def randomMatrixPermutations(mat): """Return a matrix with elements of a triangle permuted without saving the degrees. """ s = mat.shape nruter = array(mat) indices = find(triu(ones(s), k=1)) aleaInd = permutation(indices) nruter[unravel_index(indices, s)] = nruter[unravel_index(aleaInd, s)] for i in range(s[0]): nruter[i:, i] = nruter[i, i:] return nruter
def findpeak(img): import pylab as plt # we define a mask to limit the number of point sources to 1 mask=np.zeros((256,256),dtype=int) mask[120:136,:]=1 img=img*mask # find the indices of the maximum pixel value y_max,x_max=plt.unravel_index(img.argmax(), img.shape) img_max=img.max() # define a threshold threshold=0.15*img_max pixsum=0 rowsum=0 colsum=0 # calculate a weighted average around the peak with a threshold for i in range(y_max-7,y_max+7): for j in range(x_max-7,x_max+7): if img[i,j]>threshold: pixsum=pixsum+img[i,j] rowsum=rowsum+img[i,j]*i colsum=colsum+img[i,j]*j x_c=rowsum/pixsum y_c=colsum/pixsum # return the coordinates of the peak return y_c,x_c
def update(self, img): img_now = ops.read_image(img) if img_now.ndim == 3: img_now = ops.rgb2gray(img_now) x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window) # print(x) k = ops.dense_gauss_kernel(self.sigma, x, self.z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(self.alphaf, kf) response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response r = response row, col = pylab.unravel_index(r.argmax(), r.shape) self.pos = self.pos - pylab.floor(self.sz / 2) + [row, col] x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window) k = ops.dense_gauss_kernel(self.sigma, x) new_alphaf = pylab.divide(self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 new_z = x f = self.interpolation_factor self.alphaf = (1 - f) * self.alphaf + f * new_alphaf self.z = (1 - f) * self.z + f * new_z box_new = np.array([ self.pos[1] - (self.sz[1]) / 2 + 1, self.pos[0] - (self.sz[0]) / 2 + 1, self.sz[1], self.sz[0] ], dtype=np.float32) return box_new
def triHemi(mat, h=0, ind=False, anti=False, sup=True, h2flat=True): try: N = len(mat) except: N = mat if sup: fSI = triSup else: fSI = triInf if h == 0: return fSI(mat, ind=ind, anti=anti) elif h == 1: part = arange(N * N).reshape((N, N)) indices = r_[fSI(part[:N / 2, :N / 2], anti=anti), fSI(part[N / 2:, N / 2:], anti=anti)] if ind: return indices else: return mat[unravel_index(indices, mat.shape)] elif h == 2: if sup: sli = getSlices(N, h=2)[2:4] else: sli = getSlices(N, h=2)[0:2] if ind: if h2flat: return arange(N * N).reshape((N, N))[sli].flatten() else: return arange(N * N).reshape((N, N))[sli] else: return mat[sli]
def CalculCentroid(self): if self.checkBoxAuto.isChecked()==True: dataF=gaussian_filter(self.data,5) (self.xec,self.yec)=pylab.unravel_index(dataF.argmax(),self.data.shape) #prend le max self.vLine.setPos(self.xec) self.hLine.setPos(self.yec) self.roi1.setPos([self.xec-(self.r1x),self.yec-(self.r1y)]) self.roi2.setPos([self.xec-(self.r2),self.yec-(self.r2)])
def TwoTri(m1, m2): if m1.ndim == 1: L = len(m1) * 2 N = int(sqrt(L)) + 1 else: N = len(m1) nruter = empty((N, N)) * NaN indS = triSup(nruter, ind=True) indI = triInf(nruter, ind=True) if m1.ndim == 1: v1, v2 = m1, m2 else: v1, v2 = m1.take(indS), m2.take(indS) nruter[unravel_index(indS, nruter.shape)] = v1 nruter[unravel_index(indI, nruter.shape)] = v2 return nruter
def update_ret_response(self, new_img): ''' :param new_img: new frame should be normalized, for tracker_status estimating the rect_snr :return: ''' self.canvas = new_img.copy() self.trackNo += 1 # get subwindow at current estimated target position, to train classifier x = self.get_subwindow(new_img, self.pos, self.window_sz, self.cos_window) # calculate response of the classifier at all locations k = self.dense_gauss_kernel(self.sigma, x, self.z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(self.alphaf, kf) response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response row, col = pylab.unravel_index(response.argmax(), response.shape) # roi rect's topleft point add [row, col] self.tly, self.tlx = self.pos - pylab.floor(self.window_sz / 2) # here the pos is not given to self.pos at once, we need to check the psr first. # if it above the threashhold(default is 5), self.pos = pos. pos = np.array([self.tly, self.tlx]) + np.array([row, col]) # Noting, for pos(cy,cx)! for cv2.rect rect(x,y,w,h)! rect = pylab.array([ pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0] ]) rect = rect.astype(np.int) self.psr, self.trkStatus = self.tracker_status(col, row, response, rect, new_img) self.pos = pos #only update when tracker_status's psr is high if (self.psr > 10): #computing new_alphaf and observed x as z x = self.get_subwindow(new_img, self.pos, self.window_sz, self.cos_window) # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain) k = self.dense_gauss_kernel(self.sigma, x) new_alphaf = pylab.divide( self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 new_z = x # subsequent frames, interpolate model f = self.interpolation_factor self.alphaf = (1 - f) * self.alphaf + f * new_alphaf self.z = (1 - f) * self.z + f * new_z ok = 1 return ok, rect, self.psr, response
def random2HemiPermutations(mat): """Return a matrix with elements of a triangle permuted without saving the degrees. """ s = mat.shape N = s[0] N2 = N / 2 L = N * (N - 2) / 8 nruter = array(mat) Sind = triSup( arange(N2 * N2).reshape((N2, N2)) + arange(0, N2 * N2, N2).reshape((N2, 1))) Mind = (arange(N2 * N2).reshape( (N2, N2)) + arange(N2, N2 * N2 + 1, N2).reshape( (N2, 1))).reshape(N2 * N2) Iind = triSup(ones((N, N)), ind=1)[-L:] Salea = permutation(Sind) Malea = permutation(Mind) Ialea = permutation(Iind) nruter[unravel_index(Sind, s)] = nruter[unravel_index(Salea, s)] nruter[unravel_index(Mind, s)] = nruter[unravel_index(Malea, s)] nruter[unravel_index(Iind, s)] = nruter[unravel_index(Ialea, s)] for i in range(s[0]): nruter[i:, i] = nruter[i, i:] return nruter
def Coupe(self): if self.maxGraphBox.isChecked() == True: dataF = gaussian_filter(self.data, 5) (self.xc, self.yc) = pylab.unravel_index( dataF.argmax(), self.data.shape ) #take the max ndimage.measurements.center_of_mass(dataF)# self.vLine.setPos(self.xc) self.hLine.setPos(self.yc) xxx = np.arange(0, int(self.dimx), 1) # yyy = np.arange(0, int(self.dimy), 1) # coupeX = self.data[int(self.xc), :] coupeXMax = np.max(coupeX) dataCross = self.data[int(self.xc), int(self.yc)] self.label_Cross.setText('x=' + str(int(self.xc)) + ' y=' + str(int(self.yc)) + ' value=' + str(dataCross)) if coupeXMax == 0: # evite la div par zero coupeXMax = 1 coupeXnorm = (self.data.shape[0] / 10) * (coupeX / coupeXMax ) # normalize the curves self.curve2.setData(30 + coupeXnorm, yyy, clear=True) coupeY = self.data[:, int(self.yc)] coupeYMax = np.max(coupeY) if coupeYMax == 0: coupeYMax = 1 coupeYnorm = (self.data.shape[1] / 10) * (coupeY / coupeYMax) self.curve3.setData(xxx, 20 + coupeYnorm, clear=True) ### print fwhm on the X et Y curves if max >20 counts xCXmax = np.amax(coupeXnorm) # max if xCXmax > 20: fwhmX = self.fwhm(yyy, coupeXnorm, order=3) if fwhmX == None: self.textX.setText('') else: self.textX.setText('fwhm=' + str(fwhmX)) #yCXmax=yyy[coupeXnorm.argmax()] #self.textX.setPos(xCXmax-3,yCXmax+3) yCYmax = np.amax(coupeYnorm) # max if yCYmax > 20: fwhmY = self.fwhm(xxx, coupeYnorm, order=3) #xCYmax=xxx[coupeYnorm.argmax()] if fwhmY == None: self.textY.setText('', color='w') else: self.textY.setText('fwhm=' + str(fwhmY), color='w')
def triToMat(N, tri=True): '''Return the indices i,j from the upper right triangle array (triSup) as a 2*L np.array. ''' if tri: #L = N * (N-1) / 2 #nruter = zeros((2,L), dtype=int) #k = 0 #for i in range(N-1): #for j in range(i+1, N): #nruter[:, k] = [i,j] #k += 1 nruter = array(unravel_index(triSup(N, ind=1), (N, N))) else: #nruter = zeros((2,N**2), dtype=int) #k = 0 #for i in range(N): #for j in range(N): #nruter[:, k] = [i,j] #k += 1 nruter = array(unravel_index(arange(N**2), (N, N))) return nruter
def find2(z): """ 2d version of find, returning row and column indeces that match the criteria for the 2D array z Examples -------- >>> A = array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]) >>> rowIdxs, colIdxs = find2 (A > 3) """ inds = find(z) rowInds, colInds = unravel_index(inds, z.shape) return rowInds, colInds
def measure4(self): try: power_meter = Thorlabs_PM100D("PM100D") stage = TranslationalStage_3Axes('COM3','COM4') except: print "Exception raised: Devices not available" return to_limit_speed = 2 stage.AG_UC2_1.set_step_amplitude(1, self.sc.side_step_amplitude) stage.AG_UC2_1.set_step_amplitude(1, -self.sc.side_step_amplitude) stage.AG_UC2_1.set_step_amplitude(2, self.sc.bw_step_amplitude) stage.AG_UC2_1.set_step_amplitude(2, -self.sc.bw_step_amplitude) stage.AG_UC2_2.set_step_amplitude(1, self.sc.up_step_amplitude) stage.AG_UC2_2.set_step_amplitude(1, -self.sc.up_step_amplitude) stage.AG_UC2_1.move_to_limit(1, -to_limit_speed) stage.AG_UC2_1.move_to_limit(2, -to_limit_speed) stage.AG_UC2_1.print_step_amplitudes() stage.AG_UC2_2.print_step_amplitudes() # acquire plane in yz-plane self.fd.intens_yz = zeros((self.sc.bw_steps, self.sc.side_steps)) for i in range(0,self.sc.bw_steps): for j in range(0,self.sc.side_steps): if self.wants_abort: return self.fd.intens_yz[i,j] = power_meter.getPower() stage.left(self.sc.side_steps_per_move) stage.backwards(self.sc.bw_steps_per_move) stage.AG_UC2_1.move_to_limit(1, -to_limit_speed) self.fd.intens_yz = self.fd.intens_yz # this is to update the array for a traits callback #acquire vertical plane stage.AG_UC2_1.move_to_limit(2,-to_limit_speed) max_index=unravel_index(self.fd.intens_yz.argmax(), self.fd.intens_yz.shape) # find index of the max intensity stage.left(max_index[1]) print max_index stage.AG_UC2_2.move_to_limit(1,-to_limit_speed) self.fd.intens_xz = zeros((self.sc.bw_steps, self.sc.up_steps)) for i in range(0,self.sc.bw_steps): for j in range(0,self.sc.up_steps): if self.wants_abort: return self.fd.intens_xz[i,j] = power_meter.getPower() stage.up(self.sc.up_steps_per_move) stage.backwards(self.sc.bw_steps_per_move) stage.AG_UC2_2.move_to_limit(1, -to_limit_speed) self.fd.intens_xz = self.fd.intens_xz
def Coupe(self): # make plot profile on cross if self.maxGraphBox.isChecked()==True and self.bloqKeyboard==False: # find and fix the cross on the maximum of the image dataF=gaussian_filter(self.data,5) (self.xc,self.yc)=pylab.unravel_index(dataF.argmax(),self.data.shape) #take the max ndimage.measurements.center_of_mass(dataF)# self.vLine.setPos(self.xc) self.hLine.setPos(self.yc) dataCross=self.data[int(self.xc),int(self.yc)] coupeX=self.data[int(self.xc),:] coupeY=self.data[:,int(self.yc)] xxx=np.arange(0,int(self.dimx),1)# yyy=np.arange(0,int(self.dimy),1)# coupeXMax=np.max(coupeX) coupeYMax=np.max(coupeY) if coupeXMax==0: # avoid zero coupeXMax=1 if coupeYMax==0: coupeYMax=1 self.label_Cross.setText('x='+ str(int(self.xc)) + ' y=' + str(int(self.yc)) ) dataCross=round(dataCross,3) # take data value on the cross self.label_CrossValue.setText(' v.=' + str(dataCross)) coupeXnorm=(self.data.shape[0]/10)*(coupeX/coupeXMax) # normalize the curves self.curve2.setData(20+self.xminR+coupeXnorm,yyy,clear=True) coupeYnorm=(self.data.shape[1]/10)*(coupeY/coupeYMax) self.curve3.setData(xxx,20+self.yminR+coupeYnorm,clear=True)
def displayStats(self): if self.meas is not None: maxPos = pl.unravel_index(self.meas.argmax(), self.meas.shape) maxVal = self.meas[maxPos] minVal = pl.amin(self.meas) amplitude = float(maxVal - minVal) self.ui.lcdAmplitude.display('{:.0f}'.format(amplitude)) xVector = self.meas[maxPos[0], :].astype(float) - amplitude / 2 yVector = self.meas[:, maxPos[1]].astype(float) - amplitude / 2 # with interlaced camera in binning mode, pixels are size 2 in height if self.ccd.ccdParams['isInterlaced'] and not self.ilAcq: yVector = pl.array([yVector, yVector]).flatten('F') maxPos = (2 * maxPos[0], maxPos[1]) xFWHM, _, _ = findFWHM(xVector, maxPos=maxPos[1], amplitude=amplitude) yFWHM, _, _ = findFWHM(yVector, maxPos=maxPos[0], amplitude=amplitude) FWHM = pl.mean([xFWHM, yFWHM]) self.ui.lcdFwhm.display('{:.2f}'.format(FWHM)) if maxVal == 2**16 - 1: self.toggleSatIndicator(True) else: self.toggleSatIndicator(False) thresholdArray = copy.deepcopy(self.meas) low_values_indices = thresholdArray < 2 * minVal thresholdArray[low_values_indices] = 0 comPos = center_of_mass(thresholdArray) #[::-1] if self.ccd.ccdParams['isInterlaced'] and not self.ilAcq: comPos = (comPos[0], 2 * comPos[1]) self.verticalLineMax.setValue(comPos) self.horizontalLineMax.setValue(comPos)
def binned_srand_internal(sa, bedg1, bedg2): sa2 = sa - diag(diag(sa)) nb1 = len(bedg1) nb2 = len(bedg2) k_out_a = sa2.T.sum(0) k_in_a = sa2.sum(0) k_1 = k_out_a k_2 = k_in_a i1, j1 = unravel_index(find(sa2), sa2.shape) n_1_2 = zeros((nb1, nb2)) for i in range(len(i1)): kc1 = k_1[i1[i]] kc2 = k_2[j1[i]] if (kc1 * kc2) > 0: b1 = min(find((bedg1 - kc1) > 0)) - 1 b2 = min(find((bedg2 - kc2) > 0)) - 1 n_1_2[b1, b2] += 1 return n_1_2
def find(self, image): if self.should_resize_image: self.image = scipy.misc.imresize(image, 0.5) self.image = self.image / 255.0 # hack around scipy else: self.image = image # get subwindow at current estimated target position, # to train classifer x = get_subwindow(self.image, self.pos, self.sz, self.cos_window) # calculate response of the classifier at all locations k = dense_gauss_kernel(self.sigma, x, self.z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(self.alphaf, kf) self.response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response r = self.response self.row, self.col = pylab.unravel_index(r.argmax(), r.shape) self.pos = self.pos - pylab.floor(self.sz / 2) + [self.row, self.col] return self.pos
def find(self, image): if len(image.shape) == 3 and image.shape[2] > 1: image = rgb2gray(image) self.image = image if self.should_resize_image: self.image = scipy.misc.imresize(self.image, 0.5) self.image = self.image / 255.0 # get subwindow at current estimated target position, # to train classifer x = get_subwindow(self.image, self.pos, self.sz, self.cos_window) # calculate response of the classifier at all locations k = dense_gauss_kernel(self.sigma, x, self.z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(self.alphaf, kf) self.response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response r = self.response self.row, self.col = pylab.unravel_index(r.argmax(), r.shape) self.pos = self.pos - pylab.floor(self.sz/2) + [self.row, self.col] return self.pos
def track(input_video_path, show_tracking): """ 注意:以 f 结尾的变量表示频率域 """ # 目标周围的额外区域 padding = 1.0 # 空间带宽,与目标成比例 output_sigma_factor = 1 / float(16) # 高斯核带宽 sigma = 0.2 # 正则化系数 lambda_value = 1e-2 # 线性插值因子 interpolation_factor = 0.075 # 加载视频信息,包括待测试的每帧图片列表,首帧目标矩形框中心点坐标[y,x],矩形框高、宽一半的大小,是否进行图片缩放一半 # 每帧图片的 ground truth 信息,视频路径 info = load_video_info.load_video_info(input_video_path) img_files, pos, target_sz, should_resize_image, ground_truth, video_path = info # 把填充考虑进去,定义为窗口大小。 sz = pylab.floor(target_sz * (1 + padding)) # 计算想要的高斯形状的输出,其中带宽正比于目标矩形框大小 output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor # 平移目标矩形框的高度,以中心点为圆点,得到高度坐标列表 # 平移目标矩形框的宽度,以中心点为圆点,得到宽度坐标列表 grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2) grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2) # 把坐标列表边长坐标矩阵,即对二维平面范围内的区域进行网格划分 rs, cs = pylab.meshgrid(grid_x, grid_y) # 论文中公式 (19),计算得到 [0, 1] 值,越靠近中心点值越大,反之越小 y = pylab.exp((-0.5 / output_sigma ** 2) * (rs ** 2 + cs ** 2)) # 计算二维离散傅里叶变换 yf = pylab.fft2(y) # 首先计算矩形框高(某一个整数值)的 Hanning 窗(加权的余弦窗),其次计算矩形框宽的 Hanning 窗 # 最后计算两个向量的外积得到矩形框的余弦窗 cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1])) # 计算 FPS total_time = 0 # to calculate FPS # 计算精度值 positions = pylab.zeros((len(img_files), 2)) # to calculate precision # global z, response plot_tracking.z = None alphaf = None plot_tracking.response = None # 依次访问图像从图像名列表中 for frame, image_filename in enumerate(img_files): if (frame % 10) == 0: print("Processing frame", frame) # 读取图像 image_path = os.path.join(video_path, image_filename) im = pylab.imread(image_path) # 如果图像是彩色图像,则转化为灰度图像 if len(im.shape) == 3 and im.shape[2] > 1: im = rgb2gray.rgb2gray(im) # 如果需要进行图像缩放,则缩放为原来一半 if should_resize_image: im = np.array(Image.fromarray(im).resize((int(im.shape[0] / 2), int(im.shape[1] / 2)))) # 开始计时 start_time = time.time() # 提取并预处理子窗口,采用余弦子窗口 x = get_subwindow.get_subwindow(im, pos, sz, cos_window) is_first_frame = (frame == 0) # 不过不是第一帧,则计算分类器的响应 if not is_first_frame: # 计算分类器在所有位置上的相应 k = dense_gauss_kernel.dense_gauss_kernel(sigma, x, plot_tracking.z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(alphaf, kf) plot_tracking.response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # 最大响应就是目标位置 r = plot_tracking.response row, col = pylab.unravel_index(r.argmax(), r.shape) pos = pos - pylab.floor(sz / 2) + [row, col] if debug: print("Frame ==", frame) print("Max response", r.max(), "at", [row, col]) pylab.figure() pylab.imshow(cos_window) pylab.title("cos_window") pylab.figure() pylab.imshow(x) pylab.title("x") pylab.figure() pylab.imshow(plot_tracking.response) pylab.title("response") pylab.show(block=True) # end "if not first frame" # 获取目标位置的余弦窗口,用于训练分类器 x = get_subwindow.get_subwindow(im, pos, sz, cos_window) # kernel 最小方差正则化,在傅里叶域计算参数 ALPHA k = dense_gauss_kernel.dense_gauss_kernel(sigma, x) new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value)) # Eq. 7 new_z = x if is_first_frame: # 对于第一帧,训练单张图片 alphaf = new_alphaf plot_tracking.z = x else: # 对于后续帧,进行模型参数插值 f = interpolation_factor alphaf = (1 - f) * alphaf + f * new_alphaf plot_tracking.z = (1 - f) * plot_tracking.z + f * new_z # 保持当前位置,并计算 FPS positions[frame, :] = pos total_time += time.time() - start_time # 可视化显示跟踪的结果 if show_tracking == "yes": plot_tracking.plot_tracking(frame, pos, target_sz, im, ground_truth) if should_resize_image: positions = positions * 2 print("Frames-per-second:", len(img_files) / total_time) title = os.path.basename(os.path.normpath(input_video_path)) if len(ground_truth) > 0: # 画出精确率图像 show_precision.show_precision(positions, ground_truth, title)
def track(input_video_path): """ notation: variables ending with f are in the frequency domain. """ # parameters according to the paper -- padding = 1.0 # extra area surrounding the target # spatial bandwidth (proportional to target) output_sigma_factor = 1 / float(16) sigma = 0.2 # gaussian kernel bandwidth lambda_value = 1e-2 # regularization # linear interpolation factor for adaptation interpolation_factor = 0.075 info = load_video_info(input_video_path) img_files, pos, target_sz, \ should_resize_image, ground_truth, video_path = info # window size, taking padding into account sz = pylab.floor(target_sz * (1 + padding)) # desired output (gaussian shaped), bandwidth proportional to target size output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2) grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2) # [rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2)) yf = pylab.fft2(y) # print("yf.shape ==", yf.shape) # print("y.shape ==", y.shape) # store pre-computed cosine window cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1])) total_time = 0 # to calculate FPS positions = pylab.zeros((len(img_files), 2)) # to calculate precision global z, response z = None alphaf = None response = None for frame, image_filename in enumerate(img_files): if True and ((frame % 10) == 0): print("Processing frame", frame) # load image image_path = os.path.join(video_path, image_filename) im = pylab.imread(image_path) if len(im.shape) == 3 and im.shape[2] > 1: im = rgb2gray(im) # print("Image max/min value==", im.max(), "/", im.min()) if should_resize_image: im = scipy.misc.imresize(im, 0.5) start_time = time.time() # extract and pre-process subwindow x = get_subwindow(im, pos, sz, cos_window) is_first_frame = (frame == 0) if not is_first_frame: # calculate response of the classifier at all locations k = dense_gauss_kernel(sigma, x, z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(alphaf, kf) response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response r = response row, col = pylab.unravel_index(r.argmax(), r.shape) pos = pos - pylab.floor(sz / 2) + [row, col] if debug: print("Frame ==", frame) print("Max response", r.max(), "at", [row, col]) pylab.figure() pylab.imshow(cos_window) pylab.title("cos_window") pylab.figure() pylab.imshow(x) pylab.title("x") pylab.figure() pylab.imshow(response) pylab.title("response") pylab.show(block=True) # end "if not first frame" # get subwindow at current estimated target position, # to train classifer x = get_subwindow(im, pos, sz, cos_window) # Kernel Regularized Least-Squares, # calculate alphas (in Fourier domain) k = dense_gauss_kernel(sigma, x) new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value)) # Eq. 7 new_z = x if is_first_frame: # first frame, train with a single image alphaf = new_alphaf z = x else: # subsequent frames, interpolate model f = interpolation_factor alphaf = (1 - f) * alphaf + f * new_alphaf z = (1 - f) * z + f * new_z # end "first frame or not" # save position and calculate FPS positions[frame, :] = pos total_time += time.time() - start_time # visualization plot_tracking(frame, pos, target_sz, im, ground_truth) # end of "for each image in video" if should_resize_image: positions = positions * 2 print("Frames-per-second:", len(img_files) / total_time) title = os.path.basename(os.path.normpath(input_video_path)) if len(ground_truth) > 0: # show the precisions plot show_precision(positions, ground_truth, video_path, title) return
def Display(self, data): '''Display data with visualisation module ''' if self.multi == True: self.wait(0.1) self.data = data self.visualisation.newDataReceived(self.data) self.imageReceived = True self.datareceived.emit(True) time.sleep(0.01) if self.CAM.camIsRunnig == False: self.stopAcq() if self.loop == True: if self.closeLoop.isChecked(): # position de la croix de reference: self.xr = self.visualisation.xc #int(self.conf.value(self.CAM.nbcam+"/xc")) # point vise self.yr = self.visualisation.yc #int(self.conf.value(self.CAM.nbcam+"/yc")) #taille du cercle self.xlim = self.visualisation.rx / 2 #int(self.conf.value(self.nbcam+"/rx"))/2 # taille cercle self.ylim = self.visualisation.ry / 2 #int(self.conf.value(self.nbcam+"/ry"))/2 self.dimy = np.shape(self.data)[1] self.dimx = np.shape(self.data)[0] self.summ = round(data.sum(), 3) self.maxSum = self.dimy * self.dimx * 255 / 3 # si un trier de la camera sature self.maxx = round(self.data.max(), 3) dataF = gaussian_filter(self.data, 5) thresholded_image = np.copy(dataF) threshold = 0.1 # remove possible offset minn = thresholded_image.min() # remove any offset thresholded_image -= minn # remove all values less than threshold*max minn = int(self.maxx * threshold) np.place(thresholded_image, thresholded_image < minn, 0) #self.xec, self.yec= ndimage.center_of_mass(thresholded_image) (self.xec, self.yec) = pylab.unravel_index(thresholded_image.argmax(), self.data.shape) self.vLine.setPos(self.xec) self.hLine.setPos(self.yec) self.deltaX = int(self.xr) - int(self.xec) self.deltaY = int(self.yr) - int(self.yec) if self.maxx < 30 or self.summ > self.maxSum: print('signal too low or too high') self.nbImage = 0 else: self.closeLoopRadio.setChecked(True) if (abs(self.deltaX) >= self.xlim or abs(self.deltaY) > self.ylim) and self.nbImage == self.nbImageMax: # print('xec',self.xec,self.yec,self.xr,self.yr) self.deltaXMoy = int(self.xr) - int(np.mean(self.Xec)) if abs(self.deltaXMoy) >= self.xlim and (abs( self.deltaXMoy) < self.maxMvtX): print('X move the', time.strftime("%Y %m %d %H %M %S"), 'of ', self.deltaXMoy * self.pasX) if self.motor.inv[0] == True: self.motor.MOT[0].rmove(-self.deltaXMoy * self.pasX) else: self.motor.MOT[0].rmove(self.deltaXMoy * self.pasX) self.deltaYMoy = int(self.yr) - int(np.mean(self.Yec)) if abs(self.deltaYMoy) > self.ylim and (abs( self.deltaYMoy) < self.maxMvtY): print('Y move the', time.strftime("%Y %m %d %H %M %S"), 'of', self.deltaYMoy * self.pasY) if self.motor.inv[1] == True: self.motor.MOT[1].rmove(-self.deltaMoy * self.pasY) else: self.motor.MOT[1].rmove(self.deltaYMoy * self.pasY) self.nbImage = 0 self.Xec = [] self.Yec = [] elif (abs(self.deltaX) >= self.xlim or abs(self.deltaY) > self.ylim) and self.nbImage < self.nbImageMax: self.Xec.append(self.xec) self.Yec.append(self.yec) self.nbImage = self.nbImage + 1 else: self.nbImage = 0 self.Xec = [] self.Yec = [] self.closeLoopRadio.setChecked(False) else: self.closeLoopRadio.setChecked(False)
def update(self, new_img): ''' :param new_img: new frame should be normalized, for tracker_status estimating the rect_snr :return: ''' self.canvas = new_img.copy() self.trackNo += 1 # get subwindow at current estimated target position, to train classifier x = self.get_subwindow(new_img, self.pos, self.window_sz, self.cos_window) # calculate response of the classifier at all locations k = self.dense_gauss_kernel(self.sigma, x, self.z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(self.alphaf, kf) response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 self.response = response self.responsePeak = np.max(response) # target location is at the maximum response row, col = pylab.unravel_index(response.argmax(), response.shape) #roi rect's topleft point add [row, col] self.tly, self.tlx = self.pos - pylab.floor(self.window_sz / 2) #here the pos is not given to self.pos at once, we need to check the psr first. #if it above the threashhold(default is 5), self.pos = pos. pos = np.array([self.tly, self.tlx]) + np.array([row, col]) #Noting, for pos(cy,cx)! for cv2.rect rect(x,y,w,h)! rect = pylab.array([ pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0] ]) rect = rect.astype(np.int) self.rect = rect self.psr, self.trkStatus = self.tracker_status(col, row, response, rect, new_img) self.pos = pos # #bad quality tracking results # if self.psr <= 5 and self.trackNo >=5: # # computing offset based on the last 4 frame's obj_bbox'center. # # using the average center shift as the (offset_x, offset_y) # dif_rect = [] # #for iter in [-1, -2, -3]: # for iter in [-1,-2,-3 ]: # dif_rect.append(np.array(self.FourRecentRects[iter]) - np.array(self.FourRecentRects[iter - 1])) # offset_rect = np.mean(dif_rect, 0) # offset = (offset_rect[0] + offset_rect[2] / 2, offset_rect[1] + offset_rect[3] / 2) # print('Tracker offset is activited (%d, %d)' % (offset[0], offset[1])) # self.pos = self.pos + np.array([ offset[1], offset[0] ]) # # rect = pylab.array([self.pos[1] - self.target_sz[1] / 2, self.pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]]) # # rect = rect.astype(np.int) # # self.FourRecentRects[self.trackNo % 4] = rect # else: # self.pos = pos # self.FourRecentRects[self.trackNo % 4] = rect #if self.psr <= 5: # # computing offset based on the last 4 frame's obj_bbox'center. # # using the average center shift as the (offset_x, offset_y) # # self.pos = self.pos + self.posOffset # print self # print('Tracker Default Offset is activited (%d, %d)' % (self.posOffset[1], self.posOffset[0])) # # # rect = pylab.array([self.pos[1] - self.target_sz[1] / 2, self.pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]]) # # rect = rect.astype(np.int) # # self.FourRecentRects[self.trackNo % 4] = rect #else: # self.pos = pos # self.FourRecentRects[self.trackNo % 4] = rect # if self.trackNo >= 5: # dif_rect = [] # # for iter in [-1, -2, -3]: # for iter in [-1, -2, -3]: # dif_rect.append(np.array(self.FourRecentRects[iter]) - np.array(self.FourRecentRects[iter - 1])) # offset_rect = np.mean(dif_rect, 0) # offset = (offset_rect[0] + offset_rect[2] / 2, offset_rect[1] + offset_rect[3] / 2) # self.posOffset = np.array([offset[1], offset[0]]) #print ('tracker\'status:res_win_ave,max,psr, rect_snr', self.trkStatus) # if debug == True: # if self.trackNo == 1: # #pylab.ion() # interactive mode on # self.fig, self.axes = pylab.subplots(ncols=3) # self.fig.show() # # We need to draw the canvas before we start animating... # self.fig.canvas.draw() # # k_img = self.axes[0].imshow(k,animated=True) # x_img = self.axes[1].imshow(x,animated=True) # r_img = self.axes[2].imshow(response,animated=True) # # self.subimgs = [k_img, x_img, r_img] # # Let's capture the background of the figure # self.backgrounds = [self.fig.canvas.copy_from_bbox(ax.bbox) for ax in self.axes] # # pylab.show(block=False) # else: # self.subimgs[0].set_data(k) # self.subimgs[1].set_data(x) # self.subimgs[2].set_data(response) # items = enumerate(zip(self.subimgs, self.axes, self.backgrounds), start=1) # for j, (subimg, ax, background) in items: # self.fig.canvas.restore_region(background) # ax.draw_artist(subimg) # self.fig.canvas.blit(ax.bbox) # pylab.show(block=False) #only update when tracker_status's psr is high if (self.psr > 10): #computing new_alphaf and observed x as z x = self.get_subwindow(new_img, self.pos, self.window_sz, self.cos_window) # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain) k = self.dense_gauss_kernel(self.sigma, x) new_alphaf = pylab.divide( self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 new_z = x # subsequent frames, interpolate model f = self.interpolation_factor self.alphaf = (1 - f) * self.alphaf + f * new_alphaf self.z = (1 - f) * self.z + f * new_z ok = 1 return ok, rect, self.psr, response
args = sc.objdict({'i': i, 'j': j, 'r': r, 'incub': incub}) arglist.append(args) tmp_results = sc.parallelize(run_sim, iterarg=arglist) for tmp in tmp_results: results[tmp.i, tmp.j] = tmp.loglike sc.toc() #%% Plotting pl.figure(figsize=(12, 8)) delta_r = (r_vec[1] - r_vec[0]) / 2 delta_i = (i_vec[1] - i_vec[0]) / 2 plot_r_vec = pl.hstack([r_vec - delta_r, r_vec[-1] + delta_r ]) * 30 * 3 # TODO: estimate better from sim plot_i_vec = pl.hstack([i_vec - delta_i, i_vec[-1] + delta_i]) pl.pcolormesh(plot_i_vec, plot_r_vec, results, cmap=sc.parulacolormap()) # pl.imshow(results) pl.colorbar() pl.title('Log-likelihood') pl.xlabel('Days from exposure to infectiousness') pl.ylabel('R0') max_like_ind = pl.argmax(results) indices = pl.unravel_index(max_like_ind, results.shape) pl.scatter(indices[0], indices[1], marker='*', s=100, c='black', label='MLE') pl.legend() pl.savefig('log-likelihood-example.png') print('Done.')
def track(descriptor): global options desc_channel_count = descriptor.initialize(options.use_gpu) roi = loader.track_bounding_box_from_first_frame() roi = [ roi[0] + roi[2] / 2, roi[1] + roi[3] / 2, roi[2], roi[3], roi[2] * (1 + kcf_params.padding), roi[3] * (1 + kcf_params.padding) ] output_sigma = pylab.sqrt(pylab.prod([roi[3], roi[2] ])) * kcf_params.output_sigma_factor avg_count = 0 global cos_window cos_window = None template = [None for i in range(desc_channel_count)] alpha_f = [None for i in range(desc_channel_count)] response = [None for i in range(desc_channel_count)] yf = None track_time = 0 full_track_time = time.time() while loader.has_next_frame(): im = loader.next_frame() if (loader.frame_number() % 10) == 0: print("Processing frame {}".format(loader.frame_number())) start_time = time.time() is_first_frame = loader.frame_number() == 0 cropped = get_subwindow(im, roi) channels = descriptor.describe(cropped) subwindow = apply_cos_window(channels) subwindow = crop(subwindow) dmv = None if is_first_frame: grid_y = pylab.arange(subwindow.shape[1]) - pylab.floor( subwindow.shape[1] / 2) grid_x = pylab.arange(subwindow.shape[2]) - pylab.floor( subwindow.shape[2] / 2) rs, cs = pylab.meshgrid(grid_x, grid_y) y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2)) yf = pylab.fft2(y) else: for i in range(0, subwindow.shape[0]): channel = subwindow[i, :, :] # calculate response of the classifier at all locations k = dense_gauss_kernel(kcf_params.sigma, channel, template[i]) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(alpha_f[i], kf) response[i] = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # argmax = response[i].argmax() # # if response[i].item(argmax) != 0: # tmp = pylab.unravel_index(argmax, response[i].shape) # if value < response[i][tmp[0],tmp[1]]: # avg_x = tmp[1] # avg_y = tmp[0] # avg_count = 1 # value = response[i][tmp[0],tmp[1]] # chosen_i = i anchor = torch.tensor(channels[:, channels.shape[1] / 2, channels.shape[2] / 2]).unsqueeze(0) points = torch.tensor(response).view(channels.shape[0], -1).t() dmv = distance_matrix_vector(anchor, points).view(channels.shape[1], channels.shape[2]) argmax = np.array(dmv).argmax() tmp = pylab.unravel_index(argmax, subwindow.shape[1:]) moved_by = [ float(tmp[0]) - float(subwindow.shape[1]) / 2, float(tmp[1]) - float(subwindow.shape[2]) / 2 ] roi = descriptor.update_roi(roi, moved_by) cropped = get_subwindow(im, roi) channels = descriptor.describe(cropped) subwindow = apply_cos_window(channels) subwindow = crop(subwindow) for i in range(0, subwindow.shape[0]): channel = subwindow[i, :, :] k = dense_gauss_kernel(kcf_params.sigma, channel) new_alpha_f = pylab.divide( yf, (pylab.fft2(k) + kcf_params.lambda_value)) # Eq. 7 new_template = channel if is_first_frame: alpha_f[i] = new_alpha_f template[i] = new_template else: f = kcf_params.interpolation_factor alpha_f[i] = (1 - f) * alpha_f[i] + f * new_alpha_f template[i] = (1 - f) * template[i] + f * new_template track_time += time.time() - start_time results.log_tracked(im, roi, False, template[0], dmv) # end of "for each image in video" results.log_meta("speed.frames_tracked", loader.frame_number()) results.log_meta("speed.track_no_io_time", str(track_time) + "s") results.log_meta("speed.track_no_io_fps", loader.frame_number() / track_time) results.log_meta("speed.track_no_init_time", str(time.time() - full_track_time) + "s") results.show_precision() return
def fuse_trackers(tracker_list): ''' fuse all the trackers to get the estimated bounding box, bb. :param tracker_list: :return fused_bb: ''' num_t = len(tracker_list) #1 compute the tlx, tly, brx, bry of the fused region tlx, tly = (10000000, 1000000) brx, bry = (0, 0) for tracker in tracker_list: if tlx > tracker.tlx: tlx = int(tracker.tlx) if tly > tracker.tly: tly = int(tracker.tly) if brx < tracker.tlx + tracker.window_sz[1]: brx = int(tracker.tlx + tracker.window_sz[1]) if bry < tracker.tly + tracker.window_sz[0]: bry = int(tracker.tly + tracker.window_sz[0]) fuse_layers = np.zeros(((bry - tly), (brx - tlx), num_t)) for i in range(num_t): offset_x = int(tracker_list[i].tlx) - tlx offset_y = int(tracker_list[i].tly) - tly rw = int(tracker_list[i].window_sz[1]) rh = int(tracker_list[i].window_sz[0]) fuse_layers[offset_y:offset_y + rh, offset_x:offset_x + rw, i] = tracker_list[i].response ''' draw multiple layers on the plotlib ''' ml_matrix = np.zeros(((bry - tly), (brx - tlx))) for x in range(brx - tlx): for y in range(bry - tly): ml = 1. for i, tracker in enumerate(tracker_list): ofx = (tlx + x) - int(tracker.tlx) ofy = (tly + y) - int(tracker.tly) rw = int(tracker.window_sz[1]) rh = int(tracker.window_sz[0]) #using offset to locate ml value in the response matrix if (ofx >= 0 and ofy >= 0) and (ofx < rw and ofy < rh): ml *= tracker.response[ofy, ofx] else: # out of tracker.response region, assign little value ml *= np.spacing(1) ml_matrix[y, x] = ml # target location is at the maximum ml matrix est_y, est_x = pylab.unravel_index(ml_matrix.argmax(), ml_matrix.shape) # est_tlx = est_x - int(ml_matrix.shape[1]/2) + tlx # est_tly = est_y - int(ml_matrix.shape[0]/2) + tly # target scale is at the maximum response(est_y, est_x) ml_wh = np.spacing(1) est_w, est_h = 0, 0 for i, tracker in enumerate(tracker_list): ofx = (tlx + est_x) - int(tracker.tlx) ofy = (tly + est_y) - int(tracker.tly) rw = int(tracker.window_sz[1]) rh = int(tracker.window_sz[0]) # using offset to locate ml value in the response matrix if (ofx >= 0 and ofy >= 0) and (ofx < rw and ofy < rh): if (ml_wh < tracker.response[ofy, ofx]): ml_wh = tracker.response[ofy, ofx] est_h, est_w = int(tracker.target_sz[0]), int( tracker.target_sz[1]) est_bb = [ int((est_x - est_w / 2) + tlx), int((est_y - est_h / 2) + tly), est_w, est_h ] return est_bb
def track(input_video_path): """ notation: variables ending with f are in the frequency domain. """ # parameters according to the paper -- padding = 1.0 # extra area surrounding the target #spatial bandwidth (proportional to target) output_sigma_factor = 1 / float(16) sigma = 0.2 # gaussian kernel bandwidth lambda_value = 1e-2 # regularization # linear interpolation factor for adaptation interpolation_factor = 0.075 info = load_video_info(input_video_path) img_files, pos, target_sz, \ should_resize_image, ground_truth, video_path = info # window size, taking padding into account sz = pylab.floor(target_sz * (1 + padding)) # desired output (gaussian shaped), bandwidth proportional to target size output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0]/2) grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1]/2) #[rs, cs] = ndgrid(grid_x, grid_y) rs, cs = pylab.meshgrid(grid_x, grid_y) y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2)) yf = pylab.fft2(y) #print("yf.shape ==", yf.shape) #print("y.shape ==", y.shape) # store pre-computed cosine window cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1])) total_time = 0 # to calculate FPS positions = pylab.zeros((len(img_files), 2)) # to calculate precision global z, response z = None alphaf = None response = None for frame, image_filename in enumerate(img_files): if True and ((frame % 10) == 0): print("Processing frame", frame) # load image image_path = os.path.join(video_path, image_filename) im = pylab.imread(image_path) if len(im.shape) == 3 and im.shape[2] > 1: im = rgb2gray(im) #print("Image max/min value==", im.max(), "/", im.min()) if should_resize_image: im = scipy.misc.imresize(im, 0.5) start_time = time.time() # extract and pre-process subwindow x = get_subwindow(im, pos, sz, cos_window) if debug: pylab.figure() pylab.imshow(x) pylab.title("sub window") is_first_frame = (frame == 0) if not is_first_frame: # calculate response of the classifier at all locations k = dense_gauss_kernel(sigma, x, z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(alphaf, kf) response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response r = response row, col = pylab.unravel_index(r.argmax(), r.shape) pos = pos - pylab.floor(sz/2) + [row, col] if debug: print("Frame ==", frame) print("Max response", r.max(), "at", [row, col]) pylab.figure() pylab.imshow(cos_window) pylab.title("cos_window") pylab.figure() pylab.imshow(x) pylab.title("x") pylab.figure() pylab.imshow(response) pylab.title("response") pylab.show(block=True) # end "if not first frame" # get subwindow at current estimated target position, # to train classifer x = get_subwindow(im, pos, sz, cos_window) # Kernel Regularized Least-Squares, # calculate alphas (in Fourier domain) k = dense_gauss_kernel(sigma, x) new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value)) # Eq. 7 new_z = x if is_first_frame: #first frame, train with a single image alphaf = new_alphaf z = x else: # subsequent frames, interpolate model f = interpolation_factor alphaf = (1 - f) * alphaf + f * new_alphaf z = (1 - f) * z + f * new_z # end "first frame or not" # save position and calculate FPS positions[frame, :] = pos total_time += time.time() - start_time # visualization plot_tracking(frame, pos, target_sz, im, ground_truth) # end of "for each image in video" if should_resize_image: positions = positions * 2 print("Frames-per-second:", len(img_files) / total_time) title = os.path.basename(os.path.normpath(input_video_path)) if len(ground_truth) > 0: # show the precisions plot show_precision(positions, ground_truth, video_path, title) return
def Display(self, data): self.maxx = round(data.max(), 3) self.minn = round(data.min(), 3) self.summ = round(data.sum(), 3) self.moy = round(data.mean(), 3) (self.xmax, self.ymax) = pylab.unravel_index(data.argmax(), data.shape) #print(self.maxx,data[int(self.xmax),int(self.ymax)]) (self.xcmass, self.ycmass) = ndimage.center_of_mass(data) self.xcmass = round(self.xcmass, 3) self.ycmass = round(self.ycmass, 3) self.xs = data.shape[0] self.ys = data.shape[1] self.table.setRowCount(self.shoot + 1) self.table.setItem(self.shoot, 0, QTableWidgetItem(str(self.nomFichier))) self.table.setItem(self.shoot, 1, QTableWidgetItem(str(self.maxx))) self.table.setItem(self.shoot, 2, QTableWidgetItem(str(self.minn))) self.table.setItem(self.shoot, 3, QTableWidgetItem(str(self.xmax))) self.table.setItem(self.shoot, 4, QTableWidgetItem(str(self.ymax))) self.table.setItem(self.shoot, 5, QTableWidgetItem(str(self.summ))) self.table.setItem(self.shoot, 6, QTableWidgetItem(str(self.moy))) self.table.setItem( self.shoot, 7, QTableWidgetItem( (str(self.xs) + '*' + str(self.ys)))) self.table.setItem(self.shoot, 8, QTableWidgetItem(str(self.xcmass))) self.table.setItem(self.shoot, 9, QTableWidgetItem(str(self.ycmass))) if self.confMot != None: if self.motor == 'Motors': Posi = self.shoot self.label = 'Shoot' else: Posi = (self.MOT.position()) / self.unitChange self.label = self.motor + '(' + self.unitName + ')' self.table.setItem(self.shoot, 10, QTableWidgetItem(str(Posi))) else: Posi = self.shoot self.label = 'Shoot' self.table.selectRow(self.shoot) self.posMotor.append(Posi) self.table.resizeColumnsToContents() self.labelsVert.append('%s' % self.shoot) self.TableSauv.append( '%s,%.1f,%.1f,%i,%i,%.1f,%.3f,%.2f,%.2f,%.2f,%.2f,%.2f' % (self.nomFichier, self.maxx, self.minn, self.xmax, self.ymax, self.summ, self.moy, self.xs, self.ys, self.xcmass, self.ycmass, Posi)) self.Maxx.append(self.maxx) self.Minn.append(self.minn) self.Summ.append(self.summ) self.Mean.append(self.moy) self.Xmax.append(self.xmax) self.Ymax.append(self.ymax) self.Xcmass.append(self.xcmass) self.Ycmass.append(self.ycmass) self.table.setVerticalHeaderLabels(self.labelsVert) # plot Update plot if self.winCoupeMax.isWinOpen == True: self.PlotMAX( ) #(self.Maxx,axis=self.posMotor,symbol=self.symbol,pen=None,label=self.motor) if self.winCoupeMin.isWinOpen == True: self.PlotMIN() if self.winCoupeXmax.isWinOpen == True: self.PlotXMAX() if self.winCoupeYmax.isWinOpen == True: self.PlotYMAX() if self.winCoupeSum.isWinOpen == True: self.PlotSUM() if self.winCoupeMean.isWinOpen == True: self.PlotMEAN() if self.winCoupeXcmass.isWinOpen == True: self.PlotXCMASS() if self.winCoupeYcmass.isWinOpen == True: self.PlotYCMASS() # update zoom windows if self.winZoomMax.isWinOpen == True: self.ZoomMAX() if self.winZoomSum.isWinOpen == True: self.ZoomSUM() if self.winZoomMean.isWinOpen == True: self.ZoomMEAN() if self.winZoomXmax.isWinOpen == True: self.ZoomXmaX() if self.winZoomYmax.isWinOpen == True: self.ZoomYmAX() if self.winZoomCxmax.isWinOpen == True: self.ZoomCxmaX() if self.winZoomCymax.isWinOpen == True: self.ZoomCymAX() self.shoot += 1
def update(self, new_img): self.canvas = new_img.copy() self.trackNo +=1 res_max = 0. for scale_rate in self.scale_ratios: template_size = scale_rate * self.window_sz_new # get subwindow at current estimated target position, to train classifer x = self.get_subwindow(new_img, self.pos_list[-1], template_size) # calculate response of the classifier at all locations k = self.dense_gauss_kernel(self.sigma, x, self.z) kf = pylab.fft2(k) alphaf_kf = pylab.multiply(self.alphaf, kf) response = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9 # target location is at the maximum response r = response row, col = pylab.unravel_index(r.argmax(), r.shape) if res_max< np.max(r): res_row = int(row*scale_rate) res_col = int(col*scale_rate) self.window_sz_new = template_size self.target_sz = self.target_sz*scale_rate res_ave, res_max, self.psr = self.response_win_ave_max(response, col, row, winsize=12) self.scale_rate = scale_rate #roi rect's topleft point add [row, col] pos = self.pos_list[-1] - pylab.floor(self.window_sz_new / 2) + [res_row, res_col] rect = pylab.array([pos[1] - self.target_sz[1] / 2, pos[0] - self.target_sz[0] / 2, self.target_sz[1], self.target_sz[0]]) rect = rect.astype(np.int) #print (self.target_sz, self.psr, self.scale_rate) if debug: if self.trackNo == 1: #pylab.ion() # interactive mode on self.fig, self.axes = pylab.subplots(ncols=3) self.fig.show() # We need to draw the canvas before we start animating... self.fig.canvas.draw() k_img = self.axes[0].imshow(k,animated=True) x_img = self.axes[1].imshow(x,animated=True) r_img = self.axes[2].imshow(response,animated=True) self.subimgs = [k_img, x_img, r_img] # Let's capture the background of the figure self.backgrounds = [self.fig.canvas.copy_from_bbox(ax.bbox) for ax in self.axes] # tracking_rectangle = pylab.Rectangle((0, 0), 0, 0) # tracking_rectangle.set_color((1, 0, 0, 0.5)) # tracking_figure_axes.add_patch(tracking_rectangle) # # gt_point = pylab.Circle((0, 0), radius=5) # gt_point.set_color((0, 0, 1, 0.5)) # tracking_figure_axes.add_patch(gt_point) # tracking_figure_title = tracking_figure.suptitle("") pylab.show(block=False) #self.fig.show() else: self.subimgs[0].set_data(k) self.subimgs[1].set_data(x) self.subimgs[2].set_data(response) items = enumerate(zip(self.subimgs, self.axes, self.backgrounds), start=1) for j, (subimg, ax, background) in items: self.fig.canvas.restore_region(background) ax.draw_artist(subimg) self.fig.canvas.blit(ax.bbox) pylab.show(block=False) if self.psr > 10: #computing new_alphaf and observed x as z x = self.get_subwindow(new_img, pos, self.window_sz_new) # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain) k = self.dense_gauss_kernel(self.sigma, x) new_alphaf = pylab.divide(self.yf, (pylab.fft2(k) + self.lambda_value)) # Eq. 7 new_z = x # subsequent frames, interpolate model f = self.interpolation_factor self.alphaf = (1 - f) * self.alphaf + f * new_alphaf self.z = (1 - f) * self.z + f * new_z self.roi_list.append(self.get_imageROI(new_img, rect)) self.pos_list.append(pos) self.rect_list.append(rect) ok = 1 return ok, rect, self.psr