def push_back(self,I): """ Compute features for current image, push to stack """ h,w = I.shape[:2] self.h = h self.w = w if self.params['features_clahe'] > 0: Ibw = clahe.convert_bw(I) else: if I.ndim > 2: Ibw = cv2.cvtColor(I,7) else: Ibw = I for i,s in enumerate(self.scale_ranges): hn = int(np.round(h/s)) wn = int(np.round(w/s)) # WORKAROUNDS for some quirks in libviso2. # We need to make sure that the image has an odd number of rows, and an # even number of columns. hn = hn - (hn % 2 == 0) wn = wn - (wn % 2 == 1) I_ = cv2.resize(Ibw,(wn,hn)) self.matchers[i].pushBack(I_)
def push_back(self, I): """ Compute features for current image, push to stack """ h, w = I.shape[:2] self.h = h self.w = w if self.params['features_clahe'] > 0: Ibw = clahe.convert_bw(I) else: if I.ndim > 2: Ibw = cv2.cvtColor(I, 7) else: Ibw = I for i, s in enumerate(self.scale_ranges): hn = int(np.round(h / s)) wn = int(np.round(w / s)) # WORKAROUNDS for some quirks in libviso2. # We need to make sure that the image has an odd number of rows, and an # even number of columns. hn = hn - (hn % 2 == 0) wn = wn - (wn % 2 == 1) I_ = cv2.resize(Ibw, (wn, hn)) self.matchers[i].pushBack(I_)
def push_back(self, I): """ Compute features for current image, push to stack """ h, w = I.shape[:2] self.h = h self.w = w if self.params['features_clahe'] > 0: if I.ndim > 2: I_ = clahe.convert_color(I) else: I_ = clahe.convert_bw(I) else: I_ = I.copy() print(I_.dtype) kp_in_opencv, desc = self.extractor.detectAndCompute(I_, None) kp_in = np.array([P.pt for P in kp_in_opencv]) if self.params['features_prune_border'] > 0: prune = self.params['features_prune_border'] border_x = w * prune border_y = h * prune ind_valid = np.logical_and( np.logical_and(kp_in[:, 0] >= border_x, kp_in[:, 0] <= w - border_x), np.logical_and(kp_in[:, 1] >= border_y, kp_in[:, 1] <= h - border_y)) kp_in = kp_in[ind_valid, :] # Clunky filtering of python list # (Doing this via list(array(...)[inds]) is much slower). kp_in_opencv = [ kp_in_opencv[i] for i in xrange(len(kp_in_opencv)) if ind_valid[i] ] desc = desc[ind_valid, :] #kp,desc = self.brisk_descriptor_extractor.compute(I_,kp_in_opencv) #if I_.ndim > 2: # kp,desc = self.opponentbrief_descriptor_extractor.compute(I_,kp_in) #else: # kp,desc = self.brief_descriptor_extractor.compute(I_,kp_in) self.keypoints.append(kp_in_opencv) self.descriptors.append(desc)
def push_back(self,I): """ Compute features for current image, push to stack """ h,w = I.shape[:2] self.h = h self.w = w if self.params['features_clahe'] > 0: if I.ndim > 2: I_ = clahe.convert_color(I) else: I_ = clahe.convert_bw(I) else: I_ = I.copy() print(I_.dtype) kp_in_opencv,desc = self.extractor.detectAndCompute(I_,None) kp_in = np.array([P.pt for P in kp_in_opencv]) if self.params['features_prune_border'] > 0: prune = self.params['features_prune_border'] border_x = w * prune border_y = h * prune ind_valid = np.logical_and(np.logical_and(kp_in[:,0] >= border_x, kp_in[:,0] <= w-border_x), np.logical_and(kp_in[:,1] >= border_y, kp_in[:,1] <= h-border_y)) kp_in = kp_in[ind_valid,:] # Clunky filtering of python list # (Doing this via list(array(...)[inds]) is much slower). kp_in_opencv = [kp_in_opencv[i] for i in xrange(len(kp_in_opencv)) if ind_valid[i]] desc = desc[ind_valid,:] #kp,desc = self.brisk_descriptor_extractor.compute(I_,kp_in_opencv) #if I_.ndim > 2: # kp,desc = self.opponentbrief_descriptor_extractor.compute(I_,kp_in) #else: # kp,desc = self.brief_descriptor_extractor.compute(I_,kp_in) self.keypoints.append(kp_in_opencv) self.descriptors.append(desc)