def match(self): input_stamp, input_img = self.img_sub.stamp, self.img_sub.img input_features = self.query_features reference_img = self.reference_sub.img reference_features = imgsift_client(reference_img) matches = self.find_match(input_features.descriptors, reference_features.descriptors) rospy.loginfo('matches: {}'.format(len(matches))) # prepare output img matched_img = drawMatches(input_img, input_features.positions, reference_img, reference_features.positions, matches) cv2.putText(matched_img, 'matches: {}'.format(len(matches)), (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255)) self.publish_img(stamp=input_stamp, img=matched_img)
def extract_sift(obj_name): """Extract sift data from object images""" positions = [] descriptors = [] data_dir = rospy.get_param('~train_data', None) only_appropriate = rospy.get_param('~only_appropriate', True) with_mask = rospy.get_param('~with_mask', True) train_imgs = get_train_imgs(obj_name=obj_name, data_dir=data_dir, only_appropriate=only_appropriate, with_mask=with_mask) for train_img in train_imgs: train_features = imgsift_client(train_img) train_pos = np.array(train_features.positions) train_des = np.array(train_features.descriptors) positions.append(train_pos) descriptors.append(train_des) if len(positions) == 0 or len(descriptors) == 0: rospy.logerr('no images found: {0}'.format(obj_name)) return positions, descriptors = map(np.array, [positions, descriptors]) siftdata = dict(positions=positions, descriptors=descriptors) # save sift data save_siftdata(obj_name, siftdata)