def measure(wave, prob, pos, L): # there are two possible measurement outcomes choice = [0, 1] op = cp.random.choice(choice, 1, p=[1 - prob, prob]) #determine if to measure on this site # if the measurement is chosen at the given position if op[0] == 1: # construct \sigma_z_i in the many-body basis temp = cp.ones(2**(L - pos - 1)) pz = cp.concatenate((temp, -temp)) # repeat the pattern for 2**pos times pz = cp.tile(pz, 2**pos) # projection of wavefunction temp = pz * wave pup1 = 0.5 * (wave + temp) pdown1 = 0.5 * (wave - temp) # expectation values #temp = (wave.conjugate().T).dot(temp) temp = cp.vdot(wave, temp) pup = 0.5 + 0.5 * temp.real pdown = 1 - pup ''' in case the wavefunction is close to product state, the measurement might yield pup=1 or pdown=1. To avoid possible numerical errors where pup>1, we manually set the probability to be 1 or 0. ''' if abs(pup - 1) < 1e-10: pup = 1.0 pdown = 0.0 wave = pup1 elif abs(pup) < 1e-10: pup = 0.0 pdown = 1.0 wave = pdown1 else: pdown = 1 - pup ''' probility of the measurement outcome is determined by the expetation value of projection operator ''' out = np.random.choice([0, 1], 1, p=[pup, pdown]) # if the measurement projects the spin onto the z-up state if out[0] == 0: wave = (1 / cp.sqrt(pup)) * pup1 # normalization of wavefunction else: wave = (1 / cp.sqrt(pdown)) * pdown1 return wave
def dot(self, other): # Checking whether the input is a vector or not if not isinstance(other, VectorCupy): raise TypeError("Provided input vector not a %s!" % self.whoami) # Checking size (must have same number of elements) if self.size != other.size: raise ValueError("Vector size mismatching: self = %d; other = %d" % (self.size, other.size)) # Checking dimensionality if not self.checkSame(other): raise ValueError( "Dimensionality not equal: self = %s; other = %s" % (self.shape, other.shape)) if not self._check_same_device(other): raise ValueError('Provided input has to live in the same device') return cp.vdot(self.getNdArray().flatten(), other.getNdArray().flatten())
def init(self, frame, bbox, total_frame=np.inf): """ frame -- image bbox -- need xmin, ymin, width, height """ self._pos = np.array([bbox[1]+(bbox[3]-1)/2., bbox[0]+(bbox[2]-1)/2.], dtype=np.float32) self._target_sz = np.array([bbox[3], bbox[2]]) self._num_samples = min(self.config.num_samples, total_frame) xp = cp if gpu_config.use_gpu else np # calculate search area and initial scale factor search_area = np.prod(self._target_sz * self.config.search_area_scale) if search_area > self.config.max_image_sample_size: self._current_scale_factor = np.sqrt(search_area / self.config.max_image_sample_size) elif search_area < self.config.min_image_sample_size: self._current_scale_factor = np.sqrt(search_area / self.config.min_image_sample_size) else: self._current_scale_factor = 1. # target size at the initial scale self._base_target_sz = self._target_sz / self._current_scale_factor # target size, taking padding into account if self.config.search_area_shape == 'proportional': self._img_sample_sz = np.floor(self._base_target_sz * self.config.search_area_scale) elif self.config.search_area_shape == 'square': self._img_sample_sz = np.ones((2), dtype=np.float32) * np.sqrt(np.prod(self._base_target_sz * self.config.search_area_scale)) else: raise("unimplemented") features = [feature for feature in self.config.features if ("use_for_color" in feature and feature["use_for_color"] == self._is_color) or "use_for_color" not in feature] self._features = [] cnn_feature_idx = -1 for idx, feature in enumerate(features): if feature['fname'] == 'cn' or feature['fname'] == 'ic': self._features.append(TableFeature(**feature)) elif feature['fname'] == 'fhog': self._features.append(FHogFeature(**feature)) elif feature['fname']=='gray': self._features.append(GrayFeature(**feature)) elif feature['fname'].startswith('cnn'): cnn_feature_idx = idx netname = feature['fname'].split('-')[1] if netname == 'resnet50': self._features.append(ResNet50Feature(**feature)) elif netname == 'vgg16': self._features.append(VGG16Feature(**feature)) else: raise("unimplemented features") self._features = sorted(self._features, key=lambda x:x.min_cell_size) # calculate image sample size if cnn_feature_idx >= 0: self._img_sample_sz = self._features[cnn_feature_idx].init_size(self._img_sample_sz) else: cell_size = [x.min_cell_size for x in self._features] self._img_sample_sz = self._features[0].init_size(self._img_sample_sz, cell_size) for idx, feature in enumerate(self._features): if idx != cnn_feature_idx: feature.init_size(self._img_sample_sz) if self.config.use_projection_matrix: sample_dim = [ x for feature in self._features for x in feature._compressed_dim ] else: sample_dim = [ x for feature in self._features for x in feature.num_dim ] feature_dim = [ x for feature in self._features for x in feature.num_dim ] feature_sz = np.array([x for feature in self._features for x in feature.data_sz ], dtype=np.int32) # number of fourier coefficients to save for each filter layer, this will be an odd number filter_sz = feature_sz + (feature_sz + 1) % 2 # the size of the label function DFT. equal to the maximum filter size self._k1 = np.argmax(filter_sz, axis=0)[0] self._output_sz = filter_sz[self._k1] self._num_feature_blocks = len(feature_dim) # get the remaining block indices self._block_inds = list(range(self._num_feature_blocks)) self._block_inds.remove(self._k1) # how much each feature block has to be padded to the obtain output_sz self._pad_sz = [((self._output_sz - filter_sz_) / 2).astype(np.int32) for filter_sz_ in filter_sz] # compute the fourier series indices and their transposes self._ky = [np.arange(-np.ceil(sz[0]-1)/2, np.floor((sz[0]-1)/2)+1, dtype=np.float32) for sz in filter_sz] self._kx = [np.arange(-np.ceil(sz[1]-1)/2, 1, dtype=np.float32) for sz in filter_sz] # construct the gaussian label function using poisson formula sig_y = np.sqrt(np.prod(np.floor(self._base_target_sz))) * self.config.output_sigma_factor * (self._output_sz / self._img_sample_sz) yf_y = [np.sqrt(2 * np.pi) * sig_y[0] / self._output_sz[0] * np.exp(-2 * (np.pi * sig_y[0] * ky_ / self._output_sz[0])**2) for ky_ in self._ky] yf_x = [np.sqrt(2 * np.pi) * sig_y[1] / self._output_sz[1] * np.exp(-2 * (np.pi * sig_y[1] * kx_ / self._output_sz[1])**2) for kx_ in self._kx] self._yf = [yf_y_.reshape(-1, 1) * yf_x_ for yf_y_, yf_x_ in zip(yf_y, yf_x)] if gpu_config.use_gpu: self._yf = [cp.asarray(yf) for yf in self._yf] self._ky = [cp.asarray(ky) for ky in self._ky] self._kx = [cp.asarray(kx) for kx in self._kx] # construct cosine window self._cos_window = [self._cosine_window(feature_sz_) for feature_sz_ in feature_sz] # compute fourier series of interpolation function self._interp1_fs = [] self._interp2_fs = [] for sz in filter_sz: interp1_fs, interp2_fs = self._get_interp_fourier(sz) self._interp1_fs.append(interp1_fs) self._interp2_fs.append(interp2_fs) # get the reg_window_edge parameter reg_window_edge = [] for feature in self._features: if hasattr(feature, 'reg_window_edge'): reg_window_edge.append(feature.reg_window_edge) else: reg_window_edge += [self.config.reg_window_edge for _ in range(len(feature.num_dim))] # construct spatial regularization filter self._reg_filter = [self._get_reg_filter(self._img_sample_sz, self._base_target_sz, reg_window_edge_) for reg_window_edge_ in reg_window_edge] # compute the energy of the filter (used for preconditioner) if not gpu_config.use_gpu: self._reg_energy = [np.real(np.vdot(reg_filter.flatten(), reg_filter.flatten())) for reg_filter in self._reg_filter] else: self._reg_energy = [cp.real(cp.vdot(reg_filter.flatten(), reg_filter.flatten())) for reg_filter in self._reg_filter] if self.config.use_scale_filter: self._scale_filter = ScaleFilter(self._target_sz,config=self.config) self._num_scales = self._scale_filter.num_scales self._scale_step = self._scale_filter.scale_step self._scale_factor = self._scale_filter.scale_factors else: # use the translation filter to estimate the scale self._num_scales = self.config.number_of_scales self._scale_step = self.config.scale_step scale_exp = np.arange(-np.floor((self._num_scales-1)/2), np.ceil((self._num_scales-1)/2)+1) self._scale_factor = self._scale_step**scale_exp if self._num_scales > 0: # force reasonable scale changes self._min_scale_factor = self._scale_step ** np.ceil(np.log(np.max(5 / self._img_sample_sz)) / np.log(self._scale_step)) self._max_scale_factor = self._scale_step ** np.floor(np.log(np.min(frame.shape[:2] / self._base_target_sz)) / np.log(self._scale_step)) # set conjugate gradient options init_CG_opts = {'CG_use_FR': True, 'tol': 1e-6, 'CG_standard_alpha': True } self._CG_opts = {'CG_use_FR': self.config.CG_use_FR, 'tol': 1e-6, 'CG_standard_alpha': self.config.CG_standard_alpha } if self.config.CG_forgetting_rate == np.inf or self.config.learning_rate >= 1: self._CG_opts['init_forget_factor'] = 0. else: self._CG_opts['init_forget_factor'] = (1 - self.config.learning_rate) ** self.config.CG_forgetting_rate # init ana allocate self._gmm = GMM(self._num_samples,config=self.config) self._samplesf = [[]] * self._num_feature_blocks for i in range(self._num_feature_blocks): if not gpu_config.use_gpu: self._samplesf[i] = np.zeros((int(filter_sz[i, 0]), int((filter_sz[i, 1]+1)/2), sample_dim[i], self.config.num_samples), dtype=np.complex64) else: self._samplesf[i] = cp.zeros((int(filter_sz[i, 0]), int((filter_sz[i, 1]+1)/2), sample_dim[i], self.config.num_samples), dtype=cp.complex64) # allocate self._num_training_samples = 0 # extract sample and init projection matrix sample_pos = mround(self._pos) sample_scale = self._current_scale_factor xl = [x for feature in self._features for x in feature.get_features(frame, sample_pos, self._img_sample_sz, self._current_scale_factor) ] # get features if gpu_config.use_gpu: xl = [cp.asarray(x) for x in xl] xlw = [x * y for x, y in zip(xl, self._cos_window)] # do windowing xlf = [cfft2(x) for x in xlw] # fourier series xlf = interpolate_dft(xlf, self._interp1_fs, self._interp2_fs) # interpolate features, xlf = compact_fourier_coeff(xlf) # new sample to be added shift_sample_ = 2 * np.pi * (self._pos - sample_pos) / (sample_scale * self._img_sample_sz) xlf = shift_sample(xlf, shift_sample_, self._kx, self._ky) self._proj_matrix = self._init_proj_matrix(xl, sample_dim, self.config.proj_init_method) xlf_proj = self._proj_sample(xlf, self._proj_matrix) merged_sample, new_sample, merged_sample_id, new_sample_id = \ self._gmm.update_sample_space_model(self._samplesf, xlf_proj, self._num_training_samples) self._num_training_samples += 1 if self.config.update_projection_matrix: for i in range(self._num_feature_blocks): self._samplesf[i][:, :, :, new_sample_id:new_sample_id+1] = new_sample[i] # train_tracker self._sample_energy = [xp.real(x * xp.conj(x)) for x in xlf_proj] # init conjugate gradient param self._CG_state = None if self.config.update_projection_matrix: init_CG_opts['maxit'] = np.ceil(self.config.init_CG_iter / self.config.init_GN_iter) self._hf = [[[]] * self._num_feature_blocks for _ in range(2)] feature_dim_sum = float(np.sum(feature_dim)) proj_energy = [2 * xp.sum(xp.abs(yf_.flatten())**2) / feature_dim_sum * xp.ones_like(P) for P, yf_ in zip(self._proj_matrix, self._yf)] else: self._CG_opts['maxit'] = self.config.init_CG_iter self._hf = [[[]] * self._num_feature_blocks] # init the filter with zeros for i in range(self._num_feature_blocks): self._hf[0][i] = xp.zeros((int(filter_sz[i, 0]), int((filter_sz[i, 1]+1)/2), int(sample_dim[i]), 1), dtype=xp.complex64) if self.config.update_projection_matrix: # init Gauss-Newton optimization of the filter and projection matrix self._hf, self._proj_matrix = train_joint( self._hf, self._proj_matrix, xlf, self._yf, self._reg_filter, self._sample_energy, self._reg_energy, proj_energy, init_CG_opts,self.config) # re-project and insert training sample xlf_proj = self._proj_sample(xlf, self._proj_matrix) # self._sample_energy = [np.real(x * np.conj(x)) for x in xlf_proj] for i in range(self._num_feature_blocks): self._samplesf[i][:, :, :, 0:1] = xlf_proj[i] # udpate the gram matrix since the sample has changed if self.config.distance_matrix_update_type == 'exact': # find the norm of the reprojected sample new_train_sample_norm = 0. for i in range(self._num_feature_blocks): new_train_sample_norm += 2 * xp.real(xp.vdot(xlf_proj[i].flatten(), xlf_proj[i].flatten())) self._gmm._gram_matrix[0, 0] = new_train_sample_norm self._hf_full = full_fourier_coeff(self._hf) if self.config.use_scale_filter and self._num_scales > 0: self._scale_filter.update(frame, self._pos, self._base_target_sz, self._current_scale_factor) self._frame_num += 1