示例#1
0
def classical_detector(im, **config):
    if config['model'] == 'harris':
        im = np.uint8(im * 255)
        detections = cv.cornerHarris(im, 4, 3, 0.04)

    elif config['model'] == 'sift':
        im = np.uint8(im * 255)
        sift = cv.xfeatures2d.SIFT_create()
        keypoints, _ = sift.detectAndCompute(im, None)
        responses = np.array([k.response for k in keypoints])
        keypoints = np.array([k.pt for k in keypoints]).astype(int)
        detections = np.zeros(im.shape[:2], np.float)
        detections[keypoints[:, 1], keypoints[:, 0]] = responses

    elif config['model'] == 'fast':
        im = np.uint8(im * 255)
        detector = cv.FastFeatureDetector_create(15)
        corners = detector.detect(im.astype(np.uint8))
        detections = np.zeros(im.shape[:2], np.float)
        for c in corners:
            detections[tuple(np.flip(np.int0(c.pt), 0))] = c.response

    elif config['model'] == 'pretrained_magic_point':
        weights_path = '/home/ubuntu/data/superpoint_v1.pth'
        fe = SuperPointFrontend(weights_path=weights_path,
                                nms_dist=config['nms'],
                                conf_thresh=0.015,
                                nn_thresh=0.7,
                                cuda=True)
        points, desc, detections = fe.run(im[:, :, 0])

    return detections.astype(np.float32)
def classical_detector_descriptor(im, **config):
    if config['model'] == 'sift':
        im = np.uint8(im * 255)
        sift = cv.xfeatures2d.SIFT_create(nfeatures=1500)
        keypoints, desc = sift.detectAndCompute(im, None)
        responses = np.array([k.response for k in keypoints])
        keypoints = np.array([k.pt for k in keypoints]).astype(int)
        desc = np.array(desc)

        detections = np.zeros(im.shape[:2], np.float)
        detections[keypoints[:, 1], keypoints[:, 0]] = responses
        descriptors = np.zeros((im.shape[0], im.shape[1], 128), np.float)
        descriptors[keypoints[:, 1], keypoints[:, 0]] = desc

    elif config['model'] == 'orb':
        im = np.uint8(im * 255)
        orb = cv.ORB_create(nfeatures=1500)
        keypoints, desc = orb.detectAndCompute(im, None)
        responses = np.array([k.response for k in keypoints])
        keypoints = np.array([k.pt for k in keypoints]).astype(int)
        desc = np.array(desc)

        detections = np.zeros(im.shape[:2], np.float)
        detections[keypoints[:, 1], keypoints[:, 0]] = responses
        descriptors = np.zeros((im.shape[0], im.shape[1], 32), np.float)
        descriptors[keypoints[:, 1], keypoints[:, 0]] = desc

    elif config['model'] == 'fastfreak':
        im = np.uint8(im * 255)
        fast = cv.FastFeatureDetector_create(15)
        freak = cv.xfeatures2d.FREAK_create()
        keypoints = fast.detect(im)
        keypoints, desc = freak.compute(im, keypoints)
        responses = np.array([k.response for k in keypoints])
        keypoints = np.array([k.pt for k in keypoints]).astype(int)
        desc = np.array(desc)

        detections = np.zeros(im.shape[:2], np.float)
        detections[keypoints[:, 1], keypoints[:, 0]] = responses
        descriptors = np.zeros((im.shape[0], im.shape[1], 64), np.float)
        descriptors[keypoints[:, 1], keypoints[:, 0]] = desc

    elif config['model'] == 'pretrained_super_point':
        weights_path = '/home/ubuntu/data/superpoint_v1.pth'
        fe = SuperPointFrontend(weights_path=weights_path,
                                nms_dist=config['nms'],
                                conf_thresh=0.015,
                                nn_thresh=0.7,
                                cuda=True)
        points, desc, detections = fe.run(im[:, :, 0])
        points = points.astype(int)
        descriptors = np.zeros((im.shape[0], im.shape[1], 256), np.float)
        descriptors[points[1, :], points[0, :]] = np.transpose(desc)

    detections = detections.astype(np.float32)
    descriptors = descriptors.astype(np.float32)
    return (detections, descriptors)
示例#3
0
class SuperPointFeature2D:
    def __init__(self, do_cuda=True):
        self.lock = RLock()
        self.opts = SuperPointOptions(do_cuda)
        print(self.opts)

        print('SuperPointFeature2D')
        print('==> Loading pre-trained network.')
        # This class runs the SuperPoint network and processes its outputs.
        self.fe = SuperPointFrontend(weights_path=self.opts.weights_path,
                                     nms_dist=self.opts.nms_dist,
                                     conf_thresh=self.opts.conf_thresh,
                                     nn_thresh=self.opts.nn_thresh,
                                     cuda=self.opts.cuda)
        print('==> Successfully loaded pre-trained network.')

        self.pts = []
        self.kps = []
        self.des = []
        self.heatmap = []
        self.frame = None
        self.frameFloat = None
        self.keypoint_size = 20  # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint

    # compute both keypoints and descriptors
    def detectAndCompute(self, frame, mask=None):  # mask is a fake input
        with self.lock:
            self.frame = frame
            self.frameFloat = (frame.astype('float32') / 255.)
            self.pts, self.des, self.heatmap = self.fe.run(self.frameFloat)
            # N.B.: pts are - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
            #print('pts: ', self.pts.T)
            self.kps = convert_superpts_to_keypoints(self.pts.T,
                                                     size=self.keypoint_size)
            if kVerbose:
                print('detector: SUPERPOINT, #features: ', len(self.kps),
                      ', frame res: ', frame.shape[0:2])
            return self.kps, transpose_des(self.des)

    # return keypoints if available otherwise call detectAndCompute()
    def detect(self, frame, mask=None):  # mask is a fake input
        with self.lock:
            #if self.frame is not frame:
            self.detectAndCompute(frame)
            return self.kps

    # return descriptors if available otherwise call detectAndCompute()
    def compute(self,
                frame,
                kps=None,
                mask=None):  # kps is a fake input, mask is a fake input
        with self.lock:
            if self.frame is not frame:
                Printer.orange(
                    'WARNING: SUPERPOINT is recomputing both kps and des on last input frame',
                    frame.shape)
                self.detectAndCompute(frame)
            return self.kps, transpose_des(self.des)
示例#4
0
def classical_detector_descriptor(im, **config):
    if config['method'] == 'sift':
        im = np.uint8(im * 255)
        sift = cv2.xfeatures2d.SIFT_create(nfeatures=1500)
        keypoints, desc = sift.detectAndCompute(im, None)
        responses = np.array([k.response for k in keypoints])
        keypoints = np.array([k.pt for k in keypoints]).astype(int)
        desc = np.array(desc)

        detections = np.zeros(im.shape[:2], np.float)
        detections[keypoints[:, 1], keypoints[:, 0]] = responses
        descriptors = np.zeros((im.shape[0], im.shape[1], 128), np.float)
        descriptors[keypoints[:, 1], keypoints[:, 0]] = desc

    elif config['method'] == 'orb':
        im = np.uint8(im * 255)
        orb = cv2.ORB_create(nfeatures=1500)
        keypoints, desc = orb.detectAndCompute(im, None)
        responses = np.array([k.response for k in keypoints])
        keypoints = np.array([k.pt for k in keypoints]).astype(int)
        desc = np.array(desc)

        detections = np.zeros(im.shape[:2], np.float)
        detections[keypoints[:, 1], keypoints[:, 0]] = responses
        descriptors = np.zeros((im.shape[0], im.shape[1], 32), np.float)
        descriptors[keypoints[:, 1], keypoints[:, 0]] = desc

    elif config['method'] == 'pretrained_magic_point':
        weights_path = '/cluster/home/pautratr/3d_project/SuperPointPretrainedNetwork/superpoint_v1.pth'
        fe = SuperPointFrontend(weights_path=weights_path,
                                nms_dist=config['nms'],
                                conf_thresh=0.015,
                                nn_thresh=0.7,
                                cuda=False)
        points, desc, detections = fe.run(im[:, :, 0])
        points = points.astype(int)
        descriptors = np.zeros((im.shape[0], im.shape[1], 256), np.float)
        descriptors[points[1, :], points[0, :]] = np.transpose(desc)

    detections = detections.astype(np.float32)
    descriptors = descriptors.astype(np.float32)
    return (detections, descriptors)
示例#5
0
    def __init__(self, do_cuda=True):
        self.lock = RLock()
        self.opts = SuperPointOptions(do_cuda)
        print(self.opts)

        print('SuperPointFeature2D')
        print('==> Loading pre-trained network.')
        # This class runs the SuperPoint network and processes its outputs.
        self.fe = SuperPointFrontend(weights_path=self.opts.weights_path,
                                     nms_dist=self.opts.nms_dist,
                                     conf_thresh=self.opts.conf_thresh,
                                     nn_thresh=self.opts.nn_thresh,
                                     cuda=self.opts.cuda)
        print('==> Successfully loaded pre-trained network.')

        self.pts = []
        self.kps = []
        self.des = []
        self.heatmap = []
        self.frame = None
        self.frameFloat = None
        self.keypoint_size = 20  # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint
def init(cuda):
    #print("SuperPoint python init()")

    global device
    device = 'cuda' if torch.cuda.is_available() and cuda else 'cpu'

    # This class runs the SuperPoint network and processes its outputs.
    global superpoint
    superpoint = SuperPointFrontend(weights_path="superpoint_v1.pth",
                                    nms_dist=4,
                                    conf_thresh=0.015,
                                    nn_thresh=1,
                                    cuda=cuda)
示例#7
0
def classical_detector(im, **config):
    if config['method'] == 'harris':
        im = np.uint8(im * 255)
        detections = cv2.cornerHarris(im, 4, 3, 0.04)

    elif config['method'] == 'shi':
        im = np.uint8(im * 255)
        detections = np.zeros(im.shape[:2], np.float)
        thresh = np.linspace(0.0001, 1, 600, endpoint=False)
        for t in thresh:
            corners = cv2.goodFeaturesToTrack(im, 600, t, 5)
            if corners is not None:
                corners = corners.astype(np.int)
                detections[(corners[:, 0, 1], corners[:, 0, 0])] = t

    elif config['method'] == 'fast':
        im = np.uint8(im * 255)
        detector = cv2.FastFeatureDetector_create(10)
        corners = detector.detect(im.astype(np.uint8))
        detections = np.zeros(im.shape[:2], np.float)
        for c in corners:
            detections[tuple(np.flip(np.int0(c.pt), 0))] = c.response

    elif config['method'] == 'random':
        detections = np.random.rand(im.shape[0], im.shape[1])

    elif config['method'] == 'pretrained_magic_point':
        weights_path = '/cluster/home/pautratr/3d_project/SuperPointPretrainedNetwork/superpoint_v1.pth'
        fe = SuperPointFrontend(weights_path=weights_path,
                                nms_dist=config['nms'],
                                conf_thresh=0.015,
                                nn_thresh=0.7,
                                cuda=True)
        points, desc, detections = fe.run(im[:, :, 0])

    return detections.astype(np.float32)
示例#8
0
        default='parse_outputs/',
        help=
        'Directory where to write output frames (default: tracker_outputs/).')
    opt = parser.parse_args()
    print(opt)

    # This class helps load input images from different sources.
    vs = VideoStreamer(opt.input, opt.camid, None, None, opt.skip,
                       opt.img_glob)
    assert not vs.video_file

    print('==> Loading pre-trained network.')
    # This class runs the SuperPoint network and processes its outputs.
    fe = SuperPointFrontend(weights_path=opt.weights_path,
                            nms_dist=opt.nms_dist,
                            conf_thresh=opt.conf_thresh,
                            nn_thresh=opt.nn_thresh,
                            cuda=opt.cuda)
    print('==> Successfully loaded pre-trained network.')

    # Font parameters for visualizaton.
    font = cv2.FONT_HERSHEY_DUPLEX
    font_clr = (255, 255, 255)
    font_pt = (4, 12)
    font_sc = 0.4

    # Create output directory.
    print('==> Will write outputs to %s' % opt.write_dir)
    if not os.path.exists(opt.write_dir):
        os.makedirs(opt.write_dir)