def init(self, img, box): img_now = ops.read_image(img) self.target_sz = np.array([box[3], box[2]]) self.pos = np.array([box[1], box[0]]) + self.target_sz / 2 # print(self.pos) # ground_truth = # window size, taking padding into account self.sz = pylab.floor(self.target_sz * (1 + self.padding)) # desired output (gaussian shaped), bandwidth proportional to target size self.output_sigma = pylab.sqrt(pylab.prod( self.target_sz)) * self.output_sigma_factor grid_y = pylab.arange(self.sz[0]) - pylab.floor(self.sz[0] / 2) grid_x = pylab.arange(self.sz[1]) - pylab.floor(self.sz[1] / 2) #[rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) y = pylab.exp(-0.5 / self.output_sigma**2 * (rs**2 + cs**2)) self.yf = pylab.fft2(y) # print(self.yf) #print("yf.shape ==", yf.shape) #print("y.shape ==", y.shape) # store pre-computed cosine window self.cos_window = pylab.outer(pylab.hanning(self.sz[0]), pylab.hanning(self.sz[1])) if img_now.ndim == 3: img_now = ops.rgb2gray(img_now) x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window) k = ops.dense_gauss_kernel(self.sigma, x) self.alphaf = pylab.divide( self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 self.z = x
def apply_cos_window(channels): global cos_window if cos_window is None: cos_window = pylab.outer(pylab.hanning(channels.shape[1]), pylab.hanning(channels.shape[2])) return pylab.multiply(channels[:] - 0.5, cos_window)
def mask(mags, fc): m = pl.zeros(N // 2 + 1) d = int((fc / 2) * N / sr) b = int(fc * N / sr) m[b - d:b + d] = pl.hanning(2 * d) mags *= m return mags
def calcul_coefficients(self): """calculate coefficients for the chirplets Returns : apodization coeeficients """ num_coeffs = linspace(0, self._duration, int(self._samplerate * self._duration)) if self._polynome_degree: temp = (self._max_frequency - self._min_frequency) temp /= ( (self._polynome_degree + 1) * self._duration**self._polynome_degree ) * num_coeffs**self._polynome_degree + self._min_frequency wave = cos(2 * pi * num_coeffs * temp) else: temp = (self._min_frequency * (self._max_frequency / self._min_frequency)** (num_coeffs / self._duration) - self._min_frequency) temp *= self._duration / log( self._max_frequency / self._min_frequency) wave = cos(2 * pi * temp) coeffs = wave * hanning(len(num_coeffs))**2 return coeffs
def _istftm(self, X_hat=None, Phi_hat=None, pvoc=False, usewin=True, resamp=None): """ :: Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform. Uses phases from self.STFT if Phi_hat is None. Inputs: X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)] Phi_hat - N/2+1 phase STFT [None=exp(1j*angle(self.STFT))] pvoc - whether to use phase vocoder [False] usewin - whether to use overlap-add [False] Returns: x_hat - estimated signal """ if not self._have_stft: return None X_hat = P.np.abs(self.STFT) if X_hat is None else P.np.abs(X_hat) if pvoc: self._pvoc(X_hat, Phi_hat, pvoc) else: Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat self.X_hat = X_hat * P.exp( 1j * Phi_hat ) if usewin: self.win = P.hanning(self.nfft) self.win *= 1.0 / ((float(self.nfft)*(self.win**2).sum())/self.nhop) else: self.win = P.ones(self.nfft) if resamp: self.win = sig.resample(self.win, int(P.np.round(self.nfft * resamp))) fp = self._check_feature_params() self.x_hat = self._overlap_add(P.real(self.nfft * P.irfft(self.X_hat.T)), usewin=usewin, resamp=resamp) if self.verbosity: print "Extracted iSTFTM->self.x_hat" return self.x_hat
def _stft(self): if not self._have_x: print( "Error: You need to load a sound file first: use self.load_audio('filename.wav')" ) return False fp = self._check_feature_params() num_frames = len(self.x) self.STFT = P.zeros((int(self.nfft / 2 + 1), num_frames), dtype='complex') self.win = P.ones(self.wfft) if self.window == 'rect' else P.np.sqrt( P.hanning(self.wfft)) x = P.zeros(self.wfft) buf_frames = 0 for k, nex in enumerate(self.x): x = self._shift_insert(x, nex, self.nhop) # align buffer on start of audio if self.nhop >= self.wfft - k * self.nhop: self.STFT[:, k - buf_frames] = P.rfft(self.win * x, self.nfft).T else: buf_frames += 1 self.STFT = self.STFT / self.nfft self._fftfrqs = P.arange( 0, self.nfft / 2 + 1) * self.sample_rate / float(self.nfft) self._have_stft = True if self.verbosity: print("Extracted STFT: nfft=%d, hop=%d" % (self.nfft, self.nhop)) self.inverse = self._istftm self.X = abs(self.STFT) if not self.magnitude: self.X = self.X**2 return True
def time_fft(data2, samplerate=100., inverse=False,hann=False): ''' IF N_PARAMS() EQ 0 then begin print, 'time_fft, data, samplerate=samplerate, inverse=inverse,hann=hann' return, -1 ENDIF ''' data=data2 if hann: w1 = pl.hanning(len(data)) data = data*w1 #frequency axis: freqs = pl.arange(1+len(data)/2)/float(len(data)/2.0)*samplerate/2. # wut. if len(data) % 2 == 0 : freqs = pl.concatenate((freqs, -freqs[1:(len(freqs)-1)][::-1])) if len(data) % 2 != 0 : freqs = pl.concatenate((freqs, -freqs[1:len(freqs)][::-1])) response = pl.fft(data) if inverse : response = pl.ifft(data) out = {'freq': freqs, 'real': response.real, 'im': response.imag, 'abs': abs(response)} return out
def _stft(self): if not self._have_x: print "Error: You need to load a sound file first: use self.load_audio('filename.wav')" return False fp = self._check_feature_params() num_frames = len(self.x) self.STFT = P.zeros((self.nfft/2+1, num_frames), dtype='complex') self.win = P.ones(self.wfft) if self.window=='rect' else P.np.sqrt(P.hanning(self.wfft)) x = P.zeros(self.wfft) buf_frames = 0 for k, nex in enumerate(self.x): x = self._shift_insert(x, nex, self.nhop) if self.nhop >= self.wfft - k*self.nhop : # align buffer on start of audio self.STFT[:,k-buf_frames]=P.rfft(self.win*x, self.nfft).T else: buf_frames+=1 self.STFT = self.STFT / self.nfft self._fftfrqs = P.arange(0,self.nfft/2+1) * self.sample_rate/float(self.nfft) self._have_stft=True if self.verbosity: print "Extracted STFT: nfft=%d, hop=%d" %(self.nfft, self.nhop) self.inverse=self._istftm self.X = abs(self.STFT) if not self.magnitude: self.X = self.X**2 return True
def plot_signal(s,sr,start=0,end=440,N=32768): '''plots the waveform and spectrum of a signal s with sampling rate sr, from a start to an end position (waveform), and from a start position with a N-point DFT (spectrum)''' pl.figure(figsize=(8,5)) pl.subplot(211) sig = s[start:end] time = pl.arange(0,len(sig))/sr pl.plot(time,sig, 'k-') pl.ylim(-1.1,1.1) pl.xlabel("time (s)") pl.subplot(212) N = 32768 win = pl.hanning(N) scal = N*pl.sqrt(pl.mean(win**2)) sig = s[start:start+N] window = pl.rfft(sig*win/max(sig)) f = pl.arange(0,len(window)) bins = f*sr/N mags = abs(window/scal) spec = 20*pl.log10(mags/max(mags)) pl.plot(bins,spec, 'k-') pl.ylim(-60, 1) pl.ylabel("amp (dB)", size=16) pl.xlabel("freq (Hz)", size=16) pl.yticks() pl.xticks() pl.xlim(0,sr/2) pl.tight_layout() pl.show()
def CrossSpectraDataAE(self): if not (hasattr(self, 'winfftA') and hasattr(self, 'winfftE')): print "\n No 'winfftA' and 'winfftE' available. Calculating them using a hanning window and fft the same length as the data...", window = pylab.hanning(self.Length) self.windowAndFFT(window, self.Length) print 'done' dataduration = self.Cadence * self.Length CAE = numpy.conj(self.winfftA) * self.winfftE * 2 / dataduration self.CAE = CAE return CAE
def CrossSpectraDataAE(self): if not ( hasattr(self,'winfftA') and hasattr(self,'winfftE') ): print "\n No 'winfftA' and 'winfftE' available. Calculating them using a hanning window and fft the same length as the data...", window = pylab.hanning(self.Length) self.windowAndFFT(window,self.Length) print 'done' dataduration = self.Cadence*self.Length CAE = numpy.conj(self.winfftA)*self.winfftE*2/dataduration self.CAE = CAE return CAE
def initialize(self, image, pos, target_sz): if len(image.shape) == 3 and image.shape[2] > 1: image = rgb2gray(image) self.image = image if self.should_resize_image: self.image = scipy.misc.imresize(self.image, 0.5) self.image = self.image / 255.0 # window size, taking padding into account self.sz = pylab.floor(target_sz * (1 + self.padding)) self.pos = pos # desired output (gaussian shaped), bandwidth proportional to target size output_sigma = pylab.sqrt(pylab.prod( self.sz)) * self.output_sigma_factor grid_y = pylab.arange(self.sz[0]) - pylab.floor(self.sz[0] / 2) grid_x = pylab.arange(self.sz[1]) - pylab.floor(self.sz[1] / 2) #[rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) self.y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2)) self.yf = pylab.fft2(self.y) # store pre-computed cosine window self.cos_window = pylab.outer(pylab.hanning(self.sz[0]), pylab.hanning(self.sz[1])) # get subwindow at current estimated target position, # to train classifer x = get_subwindow(self.image, self.pos, self.sz, self.cos_window) # Kernel Regularized Least-Squares, # calculate alphas (in Fourier domain) k = dense_gauss_kernel(self.sigma, x) self.alphaf = pylab.divide( self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 self.z = x return
def _istftm(self, X_hat=None, Phi_hat=None, pvoc=False, usewin=True, resamp=None): """ :: Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform. Uses phases from self.STFT if Phi_hat is None. Inputs: X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)] Phi_hat - N/2+1 phase STFT [None=exp(1j*angle(self.STFT))] pvoc - whether to use phase vocoder [False] usewin - whether to use overlap-add [False] Returns: x_hat - estimated signal """ if not self._have_stft: return None X_hat = self.X if X_hat is None else P.np.abs(X_hat) if pvoc: self._pvoc(X_hat, Phi_hat, pvoc) else: Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat self.X_hat = X_hat * P.exp(1j * Phi_hat) if usewin: if self.win is None: self.win = P.ones( self.wfft) if self.window == 'rect' else P.np.sqrt( P.hanning(self.wfft)) if len(self.win) != self.nfft: self.win = P.r_[self.win, P.np.zeros(self.nfft - self.wfft)] if len(self.win) != self.nfft: error.BregmanError( "features_base.Features._istftm(): assertion failed len(self.win)==self.nfft" ) else: self.win = P.ones(self.nfft) if resamp: self.win = sig.resample(self.win, int(P.np.round(self.nfft * resamp))) fp = self._check_feature_params() self.x_hat = self._overlap_add(P.real(P.irfft(self.X_hat.T)), usewin=usewin, resamp=resamp) if self.verbosity: print("Extracted iSTFTM->self.x_hat") return self.x_hat
def initialize(self, image, pos , target_sz ): if len(image.shape) == 3 and image.shape[2] > 1: image = rgb2gray(image) self.image = image if self.should_resize_image: self.image = scipy.misc.imresize(self.image, 0.5) self.image = self.image / 255.0 # window size, taking padding into account self.sz = pylab.floor(target_sz * (1 + self.padding)) self.pos = pos # desired output (gaussian shaped), bandwidth proportional to target size output_sigma = pylab.sqrt(pylab.prod(self.sz)) * self.output_sigma_factor grid_y = pylab.arange(self.sz[0]) - pylab.floor(self.sz[0]/2) grid_x = pylab.arange(self.sz[1]) - pylab.floor(self.sz[1]/2) #[rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) self.y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2)) self.yf = pylab.fft2(self.y) # store pre-computed cosine window self.cos_window = pylab.outer(pylab.hanning(self.sz[0]), pylab.hanning(self.sz[1])) # get subwindow at current estimated target position, # to train classifer x = get_subwindow(self.image, self.pos, self.sz, self.cos_window) # Kernel Regularized Least-Squares, # calculate alphas (in Fourier domain) k = dense_gauss_kernel(self.sigma, x) self.alphaf = pylab.divide(self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 self.z = x return
def smoothed_histogram_window(ax, pdf, bins): wsize = 20 bins = 0.5 * (bins[:-1] + bins[1:]) # for wsize in range(5, 30, 5): # extending the data at beginning and at the end # to apply the window at the borders ps = plt.r_[pdf[wsize-1:0:-1], pdf, pdf[-1:-wsize:-1]] w = plt.hanning(wsize) pc = plt.convolve(w/w.sum(), ps, mode='valid') pc = pc[wsize/2:len(ps)-wsize/2] pc = pc[0:len(bins)] # plt.plot(bins, pc) # plt.fill_between(bins, 0, pc, alpha=0.1) return pc, bins
def _istftm(self, X_hat=None, Phi_hat=None, pvoc=False, usewin=True, resamp=None): """ :: Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform. Uses phases from self.STFT if Phi_hat is None. Inputs: X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)] Phi_hat - N/2+1 phase STFT [None=exp(1j*angle(self.STFT))] pvoc - whether to use phase vocoder [False] usewin - whether to use overlap-add [False] Returns: x_hat - estimated signal """ if not self._have_stft: return None X_hat = self.X if X_hat is None else P.np.abs(X_hat) if pvoc: self._pvoc(X_hat, Phi_hat, pvoc) else: Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat self.X_hat = X_hat * P.exp(1j * Phi_hat) if usewin: if self.win is None: self.win = P.ones(self.wfft) if self.window == 'rect' else P.np.sqrt( P.hanning(self.wfft)) if len(self.win) != self.nfft: self.win = P.r_[self.win, P.np.zeros(self.nfft - self.wfft)] if len(self.win) != self.nfft: error.BregmanError( "features_base.Features._istftm(): assertion failed len(self.win)==self.nfft") else: self.win = P.ones(self.nfft) if resamp: self.win = sig.resample( self.win, int(P.np.round(self.nfft * resamp))) fp = self._check_feature_params() self.x_hat = self._overlap_add( P.real(P.irfft(self.X_hat.T)), usewin=usewin, resamp=resamp) if self.verbosity: print("Extracted iSTFTM->self.x_hat") return self.x_hat
def track(input_video_path): """ notation: variables ending with f are in the frequency domain. """ # parameters according to the paper -- padding = 1.0 # extra area surrounding the target # spatial bandwidth (proportional to target) output_sigma_factor = 1 / float(16) sigma = 0.2 # gaussian kernel bandwidth lambda_value = 1e-2 # regularization # linear interpolation factor for adaptation interpolation_factor = 0.075 info = load_video_info(input_video_path) img_files, pos, target_sz, \ should_resize_image, ground_truth, video_path = info # window size, taking padding into account sz = pylab.floor(target_sz * (1 + padding)) # desired output (gaussian shaped), bandwidth proportional to target size output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2) grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2) # [rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2)) yf = pylab.fft2(y) # print("yf.shape ==", yf.shape) # print("y.shape ==", y.shape) # store pre-computed cosine window cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1])) total_time = 0 # to calculate FPS positions = pylab.zeros((len(img_files), 2)) # to calculate precision global z, response z = None alphaf = None response = None for frame, image_filename in enumerate(img_files): if True and ((frame % 10) == 0): print("Processing frame", frame) # load image image_path = os.path.join(video_path, image_filename) im = pylab.imread(image_path) if len(im.shape) == 3 and im.shape[2] > 1: im = rgb2gray(im) # print("Image max/min value==", im.max(), "/", im.min()) if should_resize_image: im = scipy.misc.imresize(im, 0.5) start_time = time.time() # extract and pre-process subwindow x = get_subwindow(im, pos, sz, cos_window) is_first_frame = (frame == 0) if not is_first_frame: # calculate response of the classifier at all locations k = dense_gauss_kernel(sigma, x, z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(alphaf, kf) response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response r = response row, col = pylab.unravel_index(r.argmax(), r.shape) pos = pos - pylab.floor(sz / 2) + [row, col] if debug: print("Frame ==", frame) print("Max response", r.max(), "at", [row, col]) pylab.figure() pylab.imshow(cos_window) pylab.title("cos_window") pylab.figure() pylab.imshow(x) pylab.title("x") pylab.figure() pylab.imshow(response) pylab.title("response") pylab.show(block=True) # end "if not first frame" # get subwindow at current estimated target position, # to train classifer x = get_subwindow(im, pos, sz, cos_window) # Kernel Regularized Least-Squares, # calculate alphas (in Fourier domain) k = dense_gauss_kernel(sigma, x) new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value)) # Eq. 7 new_z = x if is_first_frame: # first frame, train with a single image alphaf = new_alphaf z = x else: # subsequent frames, interpolate model f = interpolation_factor alphaf = (1 - f) * alphaf + f * new_alphaf z = (1 - f) * z + f * new_z # end "first frame or not" # save position and calculate FPS positions[frame, :] = pos total_time += time.time() - start_time # visualization plot_tracking(frame, pos, target_sz, im, ground_truth) # end of "for each image in video" if should_resize_image: positions = positions * 2 print("Frames-per-second:", len(img_files) / total_time) title = os.path.basename(os.path.normpath(input_video_path)) if len(ground_truth) > 0: # show the precisions plot show_precision(positions, ground_truth, video_path, title) return
ys[ys < 0] = 0 ys[ys >= im.shape[0]] = im.shape[0] - 1 xs[xs < 0] = 0 xs[xs >= im.shape[1]] = im.shape[1] - 1 # 提取子窗剪切的图像块 out = im[pylab.ix_(ys, xs)] # 将图像像素值从 [0, 1] 平移到 [-0.5, 0.5] out = out.astype(pylab.float64) - 0.5 # 余弦窗口化,论文公式 (18) return pylab.multiply(cos_window, out) if __name__ == '__main__': image_path = r'..\data\surfer\imgs' image_list = os.listdir(image_path) image = os.path.join(image_path, image_list[0]) img = mpimg.imread(image) gray = rgb2gray.rgb2gray(rgb_image=img) position = np.array([152., 286.]) size = np.array([35., 32.]) cos_window = pylab.outer(pylab.hanning(size[0]), pylab.hanning(size[1])) result = get_subwindow(im=gray, pos=position, sz=size, cos_window=cos_window) print(pylab.hanning(size[0])) print(cos_window) plt.imshow(result) plt.show()
def track(input_video_path, show_tracking): """ 注意:以 f 结尾的变量表示频率域 """ # 目标周围的额外区域 padding = 1.0 # 空间带宽,与目标成比例 output_sigma_factor = 1 / float(16) # 高斯核带宽 sigma = 0.2 # 正则化系数 lambda_value = 1e-2 # 线性插值因子 interpolation_factor = 0.075 # 加载视频信息,包括待测试的每帧图片列表,首帧目标矩形框中心点坐标[y,x],矩形框高、宽一半的大小,是否进行图片缩放一半 # 每帧图片的 ground truth 信息,视频路径 info = load_video_info.load_video_info(input_video_path) img_files, pos, target_sz, should_resize_image, ground_truth, video_path = info # 把填充考虑进去,定义为窗口大小。 sz = pylab.floor(target_sz * (1 + padding)) # 计算想要的高斯形状的输出,其中带宽正比于目标矩形框大小 output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor # 平移目标矩形框的高度,以中心点为圆点,得到高度坐标列表 # 平移目标矩形框的宽度,以中心点为圆点,得到宽度坐标列表 grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2) grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2) # 把坐标列表边长坐标矩阵,即对二维平面范围内的区域进行网格划分 rs, cs = pylab.meshgrid(grid_x, grid_y) # 论文中公式 (19),计算得到 [0, 1] 值,越靠近中心点值越大,反之越小 y = pylab.exp((-0.5 / output_sigma ** 2) * (rs ** 2 + cs ** 2)) # 计算二维离散傅里叶变换 yf = pylab.fft2(y) # 首先计算矩形框高(某一个整数值)的 Hanning 窗(加权的余弦窗),其次计算矩形框宽的 Hanning 窗 # 最后计算两个向量的外积得到矩形框的余弦窗 cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1])) # 计算 FPS total_time = 0 # to calculate FPS # 计算精度值 positions = pylab.zeros((len(img_files), 2)) # to calculate precision # global z, response plot_tracking.z = None alphaf = None plot_tracking.response = None # 依次访问图像从图像名列表中 for frame, image_filename in enumerate(img_files): if (frame % 10) == 0: print("Processing frame", frame) # 读取图像 image_path = os.path.join(video_path, image_filename) im = pylab.imread(image_path) # 如果图像是彩色图像,则转化为灰度图像 if len(im.shape) == 3 and im.shape[2] > 1: im = rgb2gray.rgb2gray(im) # 如果需要进行图像缩放,则缩放为原来一半 if should_resize_image: im = np.array(Image.fromarray(im).resize((int(im.shape[0] / 2), int(im.shape[1] / 2)))) # 开始计时 start_time = time.time() # 提取并预处理子窗口,采用余弦子窗口 x = get_subwindow.get_subwindow(im, pos, sz, cos_window) is_first_frame = (frame == 0) # 不过不是第一帧,则计算分类器的响应 if not is_first_frame: # 计算分类器在所有位置上的相应 k = dense_gauss_kernel.dense_gauss_kernel(sigma, x, plot_tracking.z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(alphaf, kf) plot_tracking.response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # 最大响应就是目标位置 r = plot_tracking.response row, col = pylab.unravel_index(r.argmax(), r.shape) pos = pos - pylab.floor(sz / 2) + [row, col] if debug: print("Frame ==", frame) print("Max response", r.max(), "at", [row, col]) pylab.figure() pylab.imshow(cos_window) pylab.title("cos_window") pylab.figure() pylab.imshow(x) pylab.title("x") pylab.figure() pylab.imshow(plot_tracking.response) pylab.title("response") pylab.show(block=True) # end "if not first frame" # 获取目标位置的余弦窗口,用于训练分类器 x = get_subwindow.get_subwindow(im, pos, sz, cos_window) # kernel 最小方差正则化,在傅里叶域计算参数 ALPHA k = dense_gauss_kernel.dense_gauss_kernel(sigma, x) new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value)) # Eq. 7 new_z = x if is_first_frame: # 对于第一帧,训练单张图片 alphaf = new_alphaf plot_tracking.z = x else: # 对于后续帧,进行模型参数插值 f = interpolation_factor alphaf = (1 - f) * alphaf + f * new_alphaf plot_tracking.z = (1 - f) * plot_tracking.z + f * new_z # 保持当前位置,并计算 FPS positions[frame, :] = pos total_time += time.time() - start_time # 可视化显示跟踪的结果 if show_tracking == "yes": plot_tracking.plot_tracking(frame, pos, target_sz, im, ground_truth) if should_resize_image: positions = positions * 2 print("Frames-per-second:", len(img_files) / total_time) title = os.path.basename(os.path.normpath(input_video_path)) if len(ground_truth) > 0: # 画出精确率图像 show_precision.show_precision(positions, ground_truth, title)
import pylab as pl T = 64 t = pl.arange(0, T) env = pl.zeros(T) pl.figure(figsize=(8, 3)) sig = pl.sin(2 * pl.pi * (T / 4) * t / T) * pl.hanning(T) pl.ylim(0, 1.1) pl.xlim(0, T // 2) pl.xticks([x for x in range(0, T // 2 + 1, 4)]) pl.stem(t[0:T // 2], abs(pl.rfft(sig) / (T / 4))[0:T // 2], 'k-', linewidth=1) pl.tight_layout() pl.show()
def init(self, img, rect ): im_width = img.shape[1] im_heihgt= img.shape[0] ys = pylab.floor(rect[1]) + pylab.arange(rect[3], dtype=int) xs = pylab.floor(rect[0]) + pylab.arange(rect[2], dtype=int) ys = ys.astype(int) xs = xs.astype(int) # check for out-of-bounds coordinates, # and set them to the values at the borders ys[ys < 0] = 0 ys[ys >= img.shape[0]] = img.shape[0] - 1 xs[xs < 0] = 0 xs[xs >= img.shape[1]] = img.shape[1] - 1 roi = self.get_imageROI(img, rect) self.init_frame = img.copy() self.canvas = img.copy() #pos is the center postion of the tracking object (cy,cx) pos = pylab.array([rect[1] + rect[3]/2, rect[0] + rect[2]/2]) self.pos_list = [pos] self.roi_list = [roi] self.rect_list = [rect] self.trackNo = 0 # parameters according to the paper -- padding = 1.0 # extra area surrounding the target(扩大窗口的因子,默认扩大2倍) # spatial bandwidth (proportional to target) output_sigma_factor = 1 / float(16) self.sigma = 0.2 # gaussian kernel bandwidth self.lambda_value = 1e-2 # regularization # linear interpolation factor for adaptation #self.interpolation_factor = 0.075 self.interpolation_factor = 0.01 self.scale_ratios = [0.985, 0.99, 0.995, 1.0, 1.005, 1.01, 1.015] #target_ze equals to [rect3, rect2] target_sz = pylab.array([int(rect[3]), int(rect[2])]) # window size(Extended window size), taking padding into account window_sz = pylab.floor(target_sz * (1 + padding)) self.window_sz = window_sz self.window_sz_new = window_sz self.target_sz = target_sz # desired output (gaussian shaped), bandwidth proportional to target size output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor grid_y = pylab.arange(window_sz[0]) - pylab.floor(window_sz[0] / 2) grid_x = pylab.arange(window_sz[1]) - pylab.floor(window_sz[1] / 2) # [rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) y = pylab.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2)) self.yf= pylab.fft2(y) # store pre-computed cosine window self.cos_window = pylab.outer(pylab.hanning(window_sz[0]), pylab.hanning(window_sz[1])) # get subwindow at current estimated target position, to train classifer x = self.get_subwindow(img, pos, window_sz) # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain) k = self.dense_gauss_kernel(self.sigma, x) #storing computed alphaf and z for next frame iteration self.alphaf = pylab.divide(self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 self.z = x #return initialization status return True
import pylab as pl sr = 44100 N = 2048 fc = 1000 w = 500 mask = pl.zeros(N // 2 + 1) d = int(w * N / sr) b = int(fc * N / sr) mask[b - d:b + d] = pl.hanning(2 * d) pl.figure(figsize=(8, 3)) pl.plot(pl.arange(0, N // 2 + 1) * sr / N, mask, 'k-', linewidth=2) pl.xlim(0, 5000) pl.tight_layout() pl.show()
def init(self, img, rect): im_width = img.shape[1] im_heihgt = img.shape[0] ys = pylab.floor(rect[1]) + pylab.arange(rect[3], dtype=int) xs = pylab.floor(rect[0]) + pylab.arange(rect[2], dtype=int) ys = ys.astype(int) xs = xs.astype(int) # check for out-of-bounds coordinates, # and set them to the values at the borders ys[ys < 0] = 0 ys[ys >= img.shape[0]] = img.shape[0] - 1 xs[xs < 0] = 0 xs[xs >= img.shape[1]] = img.shape[1] - 1 self.rect = rect #rectangle contains the bounding box of the target #pos is the center postion of the tracking object (cy,cx) self.pos = pylab.array([rect[1] + rect[3] / 2, rect[0] + rect[2] / 2]) self.posOffset = np.array([0, 0], np.int) self.tlx = rect[0] self.tly = rect[1] self.trackNo = 0 # parameters according to the paper -- padding = 1.0 # extra area surrounding the target(扩大窗口的因子,默认扩大2倍) # spatial bandwidth (proportional to target) output_sigma_factor = 1 / float(16) self.sigma = 0.2 # gaussian kernel bandwidth self.lambda_value = 1e-2 # regularization # linear interpolation factor for adaptation self.interpolation_factor = 0.075 #target_ze equals to [rect3, rect2] target_sz = pylab.array([int(rect[3]), int(rect[2])]) # window size(Extended window size), taking padding into account window_sz = pylab.floor(target_sz * (1 + padding)) self.window_sz = window_sz self.target_sz = target_sz # desired output (gaussian shaped), bandwidth proportional to target size output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor grid_y = pylab.arange(window_sz[0]) - pylab.floor(window_sz[0] / 2) grid_x = pylab.arange(window_sz[1]) - pylab.floor(window_sz[1] / 2) # [rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2)) self.yf = pylab.fft2(y) # store pre-computed cosine window self.cos_window = pylab.outer(pylab.hanning(window_sz[0]), pylab.hanning(window_sz[1])) # get subwindow at current estimated target position, to train classifer x = self.get_subwindow(img, self.pos, window_sz, self.cos_window) # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain) k = self.dense_gauss_kernel(self.sigma, x) #storing computed alphaf and z for next frame iteration self.alphaf = pylab.divide( self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 self.z = x #monitoring the tracker's self status, based on the continuity of psr self.self_status = 0 #monitoring the collaborative status, based on the distance to the voted object bouding box center, and on psr also. self.collaborate_status = 5 self.collabor_container = np.ones((10, 1), np.int) self.highpsr_container = np.ones((10, 1), np.int) self.FourRecentRects = np.zeros((4, 4), np.float) #return initialization status return True
def track(input_video_path): """ notation: variables ending with f are in the frequency domain. """ # parameters according to the paper -- padding = 1.0 # extra area surrounding the target #spatial bandwidth (proportional to target) output_sigma_factor = 1 / float(16) sigma = 0.2 # gaussian kernel bandwidth lambda_value = 1e-2 # regularization # linear interpolation factor for adaptation interpolation_factor = 0.075 info = load_video_info(input_video_path) img_files, pos, target_sz, \ should_resize_image, ground_truth, video_path = info # window size, taking padding into account sz = pylab.floor(target_sz * (1 + padding)) # desired output (gaussian shaped), bandwidth proportional to target size output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0]/2) grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1]/2) #[rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2)) yf = pylab.fft2(y) #print("yf.shape ==", yf.shape) #print("y.shape ==", y.shape) # store pre-computed cosine window cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1])) total_time = 0 # to calculate FPS positions = pylab.zeros((len(img_files), 2)) # to calculate precision global z, response z = None alphaf = None response = None for frame, image_filename in enumerate(img_files): if True and ((frame % 10) == 0): print("Processing frame", frame) # load image image_path = os.path.join(video_path, image_filename) im = pylab.imread(image_path) if len(im.shape) == 3 and im.shape[2] > 1: im = rgb2gray(im) #print("Image max/min value==", im.max(), "/", im.min()) if should_resize_image: im = scipy.misc.imresize(im, 0.5) start_time = time.time() # extract and pre-process subwindow x = get_subwindow(im, pos, sz, cos_window) if debug: pylab.figure() pylab.imshow(x) pylab.title("sub window") is_first_frame = (frame == 0) if not is_first_frame: # calculate response of the classifier at all locations k = dense_gauss_kernel(sigma, x, z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(alphaf, kf) response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response r = response row, col = pylab.unravel_index(r.argmax(), r.shape) pos = pos - pylab.floor(sz/2) + [row, col] if debug: print("Frame ==", frame) print("Max response", r.max(), "at", [row, col]) pylab.figure() pylab.imshow(cos_window) pylab.title("cos_window") pylab.figure() pylab.imshow(x) pylab.title("x") pylab.figure() pylab.imshow(response) pylab.title("response") pylab.show(block=True) # end "if not first frame" # get subwindow at current estimated target position, # to train classifer x = get_subwindow(im, pos, sz, cos_window) # Kernel Regularized Least-Squares, # calculate alphas (in Fourier domain) k = dense_gauss_kernel(sigma, x) new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value)) # Eq. 7 new_z = x if is_first_frame: #first frame, train with a single image alphaf = new_alphaf z = x else: # subsequent frames, interpolate model f = interpolation_factor alphaf = (1 - f) * alphaf + f * new_alphaf z = (1 - f) * z + f * new_z # end "first frame or not" # save position and calculate FPS positions[frame, :] = pos total_time += time.time() - start_time # visualization plot_tracking(frame, pos, target_sz, im, ground_truth) # end of "for each image in video" if should_resize_image: positions = positions * 2 print("Frames-per-second:", len(img_files) / total_time) title = os.path.basename(os.path.normpath(input_video_path)) if len(ground_truth) > 0: # show the precisions plot show_precision(positions, ground_truth, video_path, title) return
N = 1024 D = 4 H = N // D zdbfs = 32768 (sr, in1) = wf.read(sys.argv[1]) (sr, in2) = wf.read(sys.argv[2]) L1 = len(in1) L2 = len(in2) if L2 > L1: L = L2 else: L = L1 signal1 = pl.zeros(L) signal2 = pl.zeros(L) signal1[:len(in1)] = in1 / zdbfs signal2[:len(in2)] = in2 / zdbfs output = pl.zeros(L) win = pl.hanning(N) scal = 1.5 * D / 4 for n in range(0, L, H): if (L - n < N): break frame1 = stft(signal1[n:n + N], win, n) frame2 = stft(signal2[n:n + N], win, n) mags = abs(frame1) env1 = spec_env(frame1, 20) env2 = spec_env(frame2, 20) phs = pl.angle(frame1) if (min(env1) > 0): frame = p2r(mags * env2 / env1, phs) else: frame = p2r(mags * env2, phs) output[n:n + N] += istft(frame, win, n)
def fts_fft(cts, optv,rate, pks2, length, band=[], hann=False, bolo=0, plt=False,absv=False, phaseb=[], chop=False, crange=[], notquiet=False): ''' IF N_PARAMS() == 0: print('pro fts_fft, cts, optv,rate, pks2, length, band=band, hann=hann, bolo=bolo, abs=abs, phaseb=phaseb, chop=chop, crange=crange, notquiet=notquiet' print('To be added TC deconvolution' print('/abs -> phase corrected interferrogram is from both real and imaginary FFT otherwise phase corrected interferrogram just the real part.' ENDIF ''' #Document the output #result = {'freq': freq_a, 'real_FFT': real_a, 'im_FFT': im_a,'abs': abs_a, 'time_scan': timeout, 'whitel': pks, 'xint': xint_a, 'intf': int_a, 'scan_length':scanl} if len(phaseb)==0: phaseb=band pks = pl.sort(pks2) pos = pl.arange(len(cts))*optv/rate time = pks/rate for i in range(len(pks)): #grab the data within length of each #white light peak and make scan #symmetric around peak dd = pl.where((pos > (pos[pks[i]] - length)) & (pos<pos[pks[i]] + length))[0] d1 = pl.where(pos[dd] > (pos[pks[i]]))[0] d2 = pl.where(pos[dd]<(pos[pks[i]]))[0] mmin = min([len(d1), len(d2)]) #figuring out if one half is shorter than the other cts_range = pl.arange(2*mmin) + pks[i]-mmin #take out the mean and slope: xxtemp = pl.arange(len(cts_range)) rrtemp = pl.polyfit(xxtemp, cts[cts_range],1) yr = cts[cts_range] - rrtemp[0]*xxtemp yr = yr - yr.mean() #Need even length for FFT if len(yr) % 2 != 0 : yr = yr[1:len(yr)] #deal with possible chopping now: #basic idea: find the chopper peak frequency - then we'll take # the FFT, and set this #chopper peak to 0 frequency. Use the negative side band as our signal (lose 1/2 #the signal, but then don't have to deal with potentially large #change in frequency response between the two side bands). if chop != 0: if len(crange) == 0: print(crange) chophi = 15 choplow = 8 else: chophi = crange[1] choplow = crange[0] #fitting chopped signal FFT with a gaussian around the chopper peak def gaussian(x,a0,a1,a2,a3): # Mimics IDL's gaussfit with 'nterms' = 4 z = (x-a1)/a2 y = a0*pl.exp(-(z**2)/2) + a3 return y qout = time_fft(yr, samplerate =rate, hann=True) ######Does this work?####### q2 = time_fft((qout['real']+1j*qout['im']), inverse=True) fr = pl.where((qout['freq'] > choplow) & (qout['freq']<chophi))[0] fit, pcov = curve_fit(gaussian,qout['freq'][fr],qout['abs'][fr]) ## This fit has inf covariance matrix. Not great. pkf = fit[1] #found the peak response frequency. chspec_f = 30.0*pkf/optv #this is where it maps in GHz if notquiet: print("Chopper at "+str(pkf)+ " Hz") print("Spectra: "+str(chspec_f)+ " GHz") print('3rd Harmonic in subtracted scan at ',+str(2*chspec_f)+" GHz") #Save orignal for comparision with created intf yout = yr if hann: w1 = pl.hanning(len(yr)) yr = yr*w1 #Let's get this shifting right: yr=deque(yr) yr.rotate(int(-len(yr)/2.0 +1)) yr=pl.array(yr) n = pl.arange(len(yr)/2. + 1) n2 = pl.concatenate((n, -(n[1:len(n)-1])[::-1])) #CRAP should this be -2 or-1? icm = n2/len(yr)/(optv/rate) icm0 = icm FFT_r = pl.fft(yr) icm2 = icm #okay now let's do the shifting and such with the chopper: if chop != 0: chspec_icm = chspec_f/30.0 icm = -1.0*(icm-chspec_icm) if notquiet: pl.figure() pl.plot(30*icm, abs(FFT(yr)), label='Not Demodulated') pl.plot(30*icm2, abs(FFT(yr)), label='Lower Side Band') pl.plot(-30*icm, abs(FFT(yr)), label='Upper Side Band') pl.xlim(50, 300) pl.title('Chopped data, abs value') pl.ylim(0, .5) #stop #now to take out a phase: phase = pl.arctan2(FFT_r.imag, FFT_r.real) tt =pl.where((icm > phaseb[0]/30.) & (icm<phaseb[1]/30.))[0] def linfit(x,a,b): y=a+b*x return y if len(tt) == 1 : r = [0,0] if len(tt) != 1 : r,pcov = curve_fit(linfit,icm[tt], phase[tt],sigma = 1/abs(FFT_r[tt])**2) # 'sigma' here was 'measure_errors' in IDL #apply phase correction: pc = r[0] + r[1]*icm shift_c = pl.exp(-pc*1j) FFT_r = FFT_r*shift_c FFT_net = FFT_r #create the interferrogram to feed back: intf = create_interf(30*icm0,pl.fft(yr).real) if absv : intf = create_interf(30*icm, pl.fft(yr)) xin = intf['x'] intfa = intf['intf'] length_int =[len(intfa)] #pl.plot(intf.x, intf.intf, xr = [-2, 2] #keep only positive frequencies: qq = pl.where(icm >= 0) icm = icm[qq] FFT_r = FFT_r[qq] #sort this qqsort = icm.argsort() icm = icm[qqsort] FFT_r = FFT_r[qqsort] if len(band) == 0 : band = [50, 700] ptitle = 'FTS Data for bolo ' + str(bolo) if i == 0: freqout = 30.0*icm realout = (FFT_r[0:len(icm)]).real imout = (FFT_r[0:len(icm)]).imag sindex = pl.zeros(len(icm))+i timeout = [time[i]] length_inter = length_int xint = xin intfg = intfa #stop if plt: pl.figure() inband = pl.where((30*icm > band[0]) & (30*icm < band[1]))[0] if len(inband) == 0 : inband = pl.arange(len(icm)) if absv == 0: pl.plot(30*icm[1:], (FFT_r[1:]).real/max((FFT_r[inband]).real),'k-',label='%.4f cm'%(time[i]*optv)) pl.plot(30*icm[1:], (FFT_r[1:]).imag/max((FFT_r[inband]).real), 'k--') pl.xlabel('Frequency (GHz)') pl.ylabel('Normalized Spectra') pl.xlim(band[0],band[1]) pl.title(ptitle) pl.ylim(-0.5, 1) #stop if absv != 0: pl.plot(30*icm[1:], abs(FFT_r[1:])/max(abs(FFT_r[inband]))) pl.xlabel('Frequency (GHz)') pl.ylabel('Normalized Abs Value of Spectra') pl.xlim(band) pl.title(ptitle) pl.xlim(-.1, 1) if i != 0: tfreqout = 30*icm trealout = (FFT_r[0:len(icm)]).real timout = (FFT_r[0:len(icm)]).imag tsindex = pl.zeros(len(icm))+i ttimeout = [time[i]] freqout = pl.concatenate((freqout, tfreqout)) ## Don't know if these are lists,arrays or integers realout = pl.concatenate((realout, trealout)) imout = pl.concatenate((imout, timout)) sindex = pl.concatenate((sindex, tsindex)) timeout = pl.concatenate((timeout, ttimeout)) length_inter = pl.concatenate((length_inter, length_int)) xint = pl.concatenate((xint, xin)) intfg = pl.concatenate((intfg, intfa)) if plt: inband = pl.where((30*icm > band[0]) & (30*icm<band[1]))[0] if inband[0] == -1 : inband = pl.arange(len(icm)) if absv == 0: pl.plot(30*icm[1:], (FFT_r[1:]).real/max((FFT_r[inband]).real),color=pl.cm.jet(.15*i),label='%.4f cm'%(time[i]*optv)) pl.plot(30*icm[1:], (FFT_r[1:]).imag/max((FFT_r[inband]).real), '--',color=pl.cm.jet(.15*i)) if absv != 0: pl.plot(30*icm[1:], abs(FFT_r[1:])/max(abs(FFT_r[inband])),color=pl.cm.jet(.15*i),label='%.4f cm'%(time[i]*optv)) pl.legend(loc=4) pl.grid() #okay, let's get this result in some resasonable form: scanl = pl.histogram(sindex, bins = int(max(sindex)-min(sindex)+1))[0] #figure out the lengths of each scan and pad array with zeros if necessary maxl = max(scanl) scan_index = pl.arange(len(pks)) freq_a = pl.zeros((maxl, len(pks))) ##IDL is backwards, had to flip all these from CxR to RxC real_a = pl.zeros((maxl, len(pks))) im_a = pl.zeros((maxl, len(pks))) abs_a = pl.zeros((maxl, len(pks))) for i in range(len(pks)): ## The below all comes from the column-focused IDL. Re-write? ## if i == 0: if maxl - scanl[i] != 0: zeros_to_add = pl.zeros(maxl - scanl[i]) ## Don't know if these are arrays or integers ## freq_a[:,i] = pl.concatenate((freqout[0:scanl[i]],zeros_to_add)) real_a[:,i] = pl.concatenate((realout[0:scanl[i]],zeros_to_add)) im_a[:,i] = pl.concatenate((imout[0:scanl[i]],zeros_to_add)) abs_a[:,i] = pl.sqrt(real_a[:,i]**2 + im_a[:,i]**2) new_start = scanl[i] else: freq_a[:,i] = freqout[0:scanl[i]] real_a[:,i] = realout[0:scanl[i]] im_a[:,i] =imout[0:scanl[i]] abs_a[:,i] = pl.sqrt(real_a[:,i]**2 + im_a[:,i]**2) new_start = scanl[i] else: if maxl - scanl[i] != 0: zeros_to_add = pl.zeros(maxl - scanl[i]) freq_a[:,i] = pl.concatenate((freqout[new_start:new_start +scanl[i]],zeros_to_add)) real_a[:,i] = pl.concatenate((realout[new_start:new_start +scanl[i]],zeros_to_add)) im_a[:,i] =pl.concatenate((imout[new_start:new_start + scanl[i]],zeros_to_add)) abs_a[:,i] = pl.sqrt(real_a[:,i]**2 + im_a[:,i]**2) new_start = new_start + scanl[i] else: freq_a[:,i] = freqout[new_start:new_start +scanl[i]] real_a[:,i] = realout[new_start:new_start +scanl[i]] im_a[:,i] =imout[new_start:new_start +scanl[i]] abs_a[:,i] = pl.sqrt(real_a[:,i]**2 + im_a[:,i]**2) new_start = new_start + scanl[i] #now to deal with the interferrograms: maxl = max(length_inter) xint_a = pl.zeros((maxl, len(length_inter))) int_a = pl.zeros((maxl, len(length_inter))) for i in range(len(pks)): if i == 0: if maxl - length_inter[i] != 0: zeros_to_add = pl.zeros(maxl - length_inter[i]) xint_a[:,i] = pl.concatenate((zeros_to_add, xint[0:length_inter[i]])) int_a[:,i] = pl.concatenate((zeros_to_add, intfg[0:length_inter[i]])) new_start = length_inter[i] else: xint_a[:,i] = xint[0:length_inter[i]] int_a[:,i] = intfg[0:length_inter[i]] new_start = length_inter[i] else: if maxl - length_inter[i] != 0: zeros_to_add = pl.zeros(maxl - length_inter[i]) xint_a[:,i] = pl.concatenate((zeros_to_add, xint[new_start:new_start+length_inter[i]])) int_a[:,i] = pl.concatenate((zeros_to_add, intfg[new_start:new_start+length_inter[i]])) new_start = new_start + length_inter[i] else: xint_a[:,i] = xint[new_start:new_start+length_inter[i]] int_a[:,i] = intfg[new_start:new_start+length_inter[i]] result = {'freq': freq_a, 'real_FFT': real_a, 'im_FFT': im_a,'abs': abs_a, 'time_scan': timeout, 'whitel': pks, 'xint': xint_a, 'intf': int_a, 'scan_length':scanl} return result