예제 #1
0
    def __getitem__(self, idx):
        while 1:
            try:
                (image1, depth1, intrinsics1, pose1, bbox1, image2, depth2,
                 intrinsics2, pose2,
                 bbox2) = self.recover_pair(self.dataset[idx])
                image1 = preprocess_image(image1,
                                          preprocessing=self.preprocessing)
                image2 = preprocess_image(image2,
                                          preprocessing=self.preprocessing)
                assert np.all(image1.shape == image2.shape)
                break
            except IndexError:
                idx -= 1
            except:
                del self.dataset[idx]

        return {
            'image1': torch.from_numpy(image1.astype(np.float32)),
            'depth1': torch.from_numpy(depth1.astype(np.float32)),
            'intrinsics1': torch.from_numpy(intrinsics1.astype(np.float32)),
            'pose1': torch.from_numpy(pose1.astype(np.float32)),
            'bbox1': torch.from_numpy(bbox1.astype(np.float32)),
            'image2': torch.from_numpy(image2.astype(np.float32)),
            'depth2': torch.from_numpy(depth2.astype(np.float32)),
            'intrinsics2': torch.from_numpy(intrinsics2.astype(np.float32)),
            'pose2': torch.from_numpy(pose2.astype(np.float32)),
            'bbox2': torch.from_numpy(bbox2.astype(np.float32))
        }
예제 #2
0
    def __getitem__(self, idx):
        image1, image2 = self.dataset[idx]

        image1 = preprocess_image(image1, preprocessing=self.preprocessing)
        image2 = preprocess_image(image2, preprocessing=self.preprocessing)
        #print('hi', len(self.dataset))

        return {
			'image1': torch.from_numpy(image1.astype(np.float32)),
			'image2': torch.from_numpy(image2.astype(np.float32)),
			}
예제 #3
0
    def __getitem__(self, idx):
        image1, image2, pos1, pos2 = self.dataset[idx]

        image1 = preprocess_image(image1, preprocessing=self.preprocessing)
        image2 = preprocess_image(image2, preprocessing=self.preprocessing)

        return {
            'image1': torch.from_numpy(image1.astype(np.float32)),
            'image2': torch.from_numpy(image2.astype(np.float32)),
            'pos1': torch.from_numpy(pos1.astype(np.float32)),
            'pos2': torch.from_numpy(pos2.astype(np.float32))
        }
def extract(image, model, device, multiscale=False, preprocessing='caffe'):
    resized_image = image

    fact_i = image.shape[0] / resized_image.shape[0]
    fact_j = image.shape[1] / resized_image.shape[1]

    input_image = preprocess_image(resized_image, preprocessing=preprocessing)
    with torch.no_grad():
        if multiscale:
            keypoints, scores, descriptors = process_multiscale(
                torch.tensor(input_image[np.newaxis, :, :, :].astype(
                    np.float32),
                             device=device), model)
        else:
            keypoints, scores, descriptors = process_multiscale(torch.tensor(
                input_image[np.newaxis, :, :, :].astype(np.float32),
                device=device),
                                                                model,
                                                                scales=[1])

    keypoints[:, 0] *= fact_i
    keypoints[:, 1] *= fact_j
    keypoints = keypoints[:, [1, 0, 2]]

    feat = {}
    feat['keypoints'] = keypoints
    feat['scores'] = scores
    feat['descriptors'] = descriptors

    return feat
예제 #5
0
def extract(image, args, model, device):
	if len(image.shape) == 2:
		image = image[:, :, np.newaxis]
		image = np.repeat(image, 3, -1)

	input_image = preprocess_image(
		image,
		preprocessing=args.preprocessing
	)
	with torch.no_grad():
		keypoints, scores, descriptors = process_multiscale(
			torch.tensor(
				input_image[np.newaxis, :, :, :].astype(np.float32),
				device=device
			),
			model,
			scales=[1]
		)

	keypoints = keypoints[:, [1, 0, 2]]

	feat = {}
	feat['keypoints'] = keypoints
	feat['scores'] = scores
	feat['descriptors'] = descriptors

	return feat
예제 #6
0
    def compute_kps_des(self, image):   
        with self.lock:         
            print('D2Net image shape:',image.shape)               
            if len(image.shape) == 2:
                    image = image[:, :, np.newaxis]
                    image = np.repeat(image, 3, -1)

            # TODO: switch to PIL.Image due to deprecation of scipy.misc.imresize.
            resized_image = image
            if max(resized_image.shape) > self.max_edge:
                resized_image = scipy.misc.imresize(
                    resized_image,
                    self.max_edge / max(resized_image.shape)
                ).astype('float')
            if sum(resized_image.shape[: 2]) > self.max_sum_edges:
                resized_image = scipy.misc.imresize(
                    resized_image,
                    self.max_sum_edges / sum(resized_image.shape[: 2])
                ).astype('float')

            fact_i = image.shape[0] / resized_image.shape[0]
            fact_j = image.shape[1] / resized_image.shape[1]
            print('scale factors: {}, {}'.format(fact_i,fact_j))

            input_image = preprocess_image(
                resized_image,
                preprocessing=self.preprocessing
            )
            with torch.no_grad():
                if self.multiscale:
                    self.pts, scores, descriptors = process_multiscale(
                        torch.tensor(
                            input_image[np.newaxis, :, :, :].astype(np.float32),
                            device=self.device
                        ),
                        self.model
                    )
                else:
                    self.pts, scores, descriptors = process_multiscale(
                        torch.tensor(
                            input_image[np.newaxis, :, :, :].astype(np.float32),
                            device=self.device
                        ),
                        self.model,
                        scales=[1]
                    )

            # Input image coordinates
            self.pts[:, 0] *= fact_i
            self.pts[:, 1] *= fact_j
            # i, j -> u, v
            self.pts = self.pts[:, [1, 0, 2]]
            #print('pts.shape: ', self.pts.shape)
            #print('pts:', self.pts)
            
            self.kps = convert_pts_to_keypoints(self.pts, scores, self.keypoint_size)        
            self.des = descriptors 
            return self.kps, self.des 
예제 #7
0
    def __getitem__(self, idx):
        image1, depth1, intrinsics1, pose1, bbox1, image2, depth2, intrinsics2, pose2, bbox2 = self.recover_pair(self.dataset[idx])
      
        image1 = preprocess_image(image1, preprocessing=self.preprocessing)
        image2 = preprocess_image(image2, preprocessing=self.preprocessing)

        return {
            'image1': torch.from_numpy(image1.astype(np.float32)),
            'depth1': torch.from_numpy(depth1.astype(np.float32)),
            'intrinsics1': torch.from_numpy(intrinsics1.astype(np.float32)),
            'pose1': torch.from_numpy(pose1.astype(np.float32)),
            'bbox1': torch.from_numpy(bbox1.astype(np.float32)),
            'image2': torch.from_numpy(image2.astype(np.float32)),
            'depth2': torch.from_numpy(depth2.astype(np.float32)),
            'intrinsics2': torch.from_numpy(intrinsics2.astype(np.float32)),
            'pose2': torch.from_numpy(pose2.astype(np.float32)),
            'bbox2': torch.from_numpy(bbox2.astype(np.float32))
        }
예제 #8
0
def extract(image, args, model, device):
# def extract(file, args, model, device):
# 	image = imageio.imread(file)
	if len(image.shape) == 2:
		image = image[:, :, np.newaxis]
		image = np.repeat(image, 3, -1)

	resized_image = image
	if max(resized_image.shape) > args.max_edge:
		resized_image = scipy.misc.imresize(
			resized_image,
			args.max_edge / max(resized_image.shape)
		).astype('float')
	if sum(resized_image.shape[: 2]) > args.max_sum_edges:
		resized_image = scipy.misc.imresize(
			resized_image,
			args.max_sum_edges / sum(resized_image.shape[: 2])
		).astype('float')

	fact_i = image.shape[0] / resized_image.shape[0]
	fact_j = image.shape[1] / resized_image.shape[1]

	input_image = preprocess_image(
		resized_image,
		preprocessing=args.preprocessing
	)
	with torch.no_grad():
		if args.multiscale:
			keypoints, scores, descriptors = process_multiscale(
				torch.tensor(
					input_image[np.newaxis, :, :, :].astype(np.float32),
					device=device
				),
				model
			)
		else:
			keypoints, scores, descriptors = process_multiscale(
				torch.tensor(
					input_image[np.newaxis, :, :, :].astype(np.float32),
					device=device
				),
				model,
				scales=[1]
			)

	keypoints[:, 0] *= fact_i
	keypoints[:, 1] *= fact_j
	keypoints = keypoints[:, [1, 0, 2]]

	feat = {}
	feat['keypoints'] = keypoints
	feat['scores'] = scores
	feat['descriptors'] = descriptors

	return feat
예제 #9
0
def cnn_feature_extract(image, scales=[.25, 0.50, 1.0], nfeatures=1000):
    if len(image.shape) == 2:
        image = image[:, :, np.newaxis]
        image = np.repeat(image, 3, -1)

    # TODO: switch to PIL.Image due to deprecation of scipy.misc.imresize.
    resized_image = image
    if max(resized_image.shape) > max_edge:
        resized_image = scipy.misc.imresize(
            resized_image, max_edge / max(resized_image.shape)).astype('float')
    if sum(resized_image.shape[:2]) > max_sum_edges:
        resized_image = scipy.misc.imresize(
            resized_image,
            max_sum_edges / sum(resized_image.shape[:2])).astype('float')

    fact_i = image.shape[0] / resized_image.shape[0]
    fact_j = image.shape[1] / resized_image.shape[1]

    # lib - utils
    input_image = preprocess_image(resized_image, preprocessing="torch")
    with torch.no_grad():
        if multiscale:
            # lib - pyramid.py
            keypoints, scores, descriptors = process_multiscale(
                torch.tensor(input_image[np.newaxis, :, :, :].astype(
                    np.float32),
                             device=device), model, scales)
        else:
            keypoints, scores, descriptors = process_multiscale(
                torch.tensor(input_image[np.newaxis, :, :, :].astype(
                    np.float32),
                             device=device), model, scales)

    # Input image coordinates
    keypoints[:, 0] *= fact_i
    keypoints[:, 1] *= fact_j
    # i, j -> u, v
    keypoints = keypoints[:, [1, 0, 2]]

    if nfeatures != -1:
        #根据scores排序, scores 정렬하기
        scores2 = np.array([scores]).T
        res = np.hstack((scores2, keypoints))
        res = res[np.lexsort(-res[:, ::-1].T)]

        res = np.hstack((res, descriptors))
        #取前几个
        scores = res[0:nfeatures, 0].copy()
        keypoints = res[0:nfeatures, 1:4].copy()
        descriptors = res[0:nfeatures, 4:].copy()
        del res
    return keypoints, scores, descriptors
예제 #10
0
def read_and_process_image(img_path, resize=None, H=None, h=None, w=None, preprocessing='caffe'):
	img1 = Image.open(img_path)
	if resize:
		img1 = img1.resize(resize)
	if(img1.mode != 'RGB'):
		img1 = img1.convert('RGB')
	img1 = np.array(img1)
	if H is not None:
		img1 = cv2.warpPerspective(img1, H, dsize=(400, 400))
		# cv2.imshow("Image", cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
		# cv2.waitKey(0)
	igp1 = torch.from_numpy(preprocess_image(img1, preprocessing=preprocessing).astype(np.float32))
	return igp1, img1
    def __getitem__(self, idx):
        while 1:
            try:
                img = self.valid_images[idx]

                img1 = Image.open(img)
                img1 = self.imgCrop(img1)
                width, height = img1.size

                H, theta = self.imgRotH(img1, min=0, max=360)

                img1 = np.array(img1)
                img2 = cv2.warpPerspective(img1, H, dsize=(width, height))
                img2 = np.array(img2)

                pos1, pos2 = self.getGrid(img1, img2, H)

                assert (len(pos1) != 0 and len(pos2) != 0)
                break
            except IndexError:
                print("IndexError")
                exit(1)
            except:
                del self.valid_images[idx]

        img1 = preprocess_image(img1, preprocessing=self.preprocessing)
        img2 = preprocess_image(img2, preprocessing=self.preprocessing)

        return {
            'image1': torch.from_numpy(img1.astype(np.float32)),
            'image2': torch.from_numpy(img2.astype(np.float32)),
            'pos1': torch.from_numpy(pos1.astype(np.float32)),
            'pos2': torch.from_numpy(pos2.astype(np.float32)),
            'H': np.array(H),
            'theta': np.array([theta])
        }
    def get_features(self, image_path, keypoints, preprocessing='caffe'):
        keypoints_working_copy = keypoints.copy()
        image = imageio.imread(image_path)
        if len(image.shape) == 2:
            image = image[:, :, np.newaxis]
            image = np.repeat(image, 3, -1)
        resized_image = self.__resize_image__(image)
        fact_i = image.shape[0] / resized_image.shape[0]
        fact_j = image.shape[1] / resized_image.shape[1]

        #print('{}, {}'.format(fact_i, fact_j))

        input_image = preprocess_image(resized_image,
                                       preprocessing=preprocessing)
        with torch.no_grad():
            cur_image = torch.tensor(input_image[np.newaxis, :, :, :].astype(
                np.float32),
                                     device=self.device)
            #print(cur_image.size())
            _, _, h_img, w_img = cur_image.size()
            dense_features = self.model.dense_feature_extraction(cur_image)
            _, _, h, w = dense_features.size()
            #print(dense_features.shape)
            factor_h = float(h_img) / h
            factor_w = float(w_img) / w
            #print('{}, {}'.format(factor_h, factor_w))
            #print(keypoints_working_copy.max(axis=0))
            keypoints_working_copy[:, 0] /= factor_h
            keypoints_working_copy[:, 1] /= factor_w
            keypoints_working_copy = keypoints_working_copy.astype(np.int32)
            #print(keypoints_working_copy.max(axis=0))
            #print(keypoints_working_copy.shape)
            descriptors = dense_features.cpu().numpy()[
                0, :, keypoints_working_copy[:, 0], keypoints_working_copy[:,
                                                                           1]]
            #print(descriptors.shape)
            """keypoints, scores, descriptors = process_multiscale(
                torch.tensor(
                    input_image[np.newaxis, :, :, :].astype(np.float32),
                    device=self.device
                ),
                self.model,
                scales=[1], 
                preset_keypoints=keypoints
            )"""
        return descriptors
예제 #13
0
def extract_features(image, args, device, model):
    # image = cv2.resize(image,(720,576))
    #cv2.imwrite('im2.jpg',image)
    #t1=cv2.getTickCount()
    if len(image.shape) == 2:
        image = image[:, :, np.newaxis]
        image = np.repeat(image, 3, -1)

    # TODO: switch to PIL.Image due to deprecation of scipy.misc.imresize.
    resized_image = image

    if max(resized_image.shape) > args.max_edge:
        ratio = args.max_edge / max(resized_image.shape)
        h, w, ch = resized_image.shape
        resized_image = resize(resized_image, (int(h * ratio), int(w * ratio)))
        # ).astype('float')
    if sum(resized_image.shape[:2]) > args.max_sum_edges:
        ratio = args.max_sum_edges / sum(resized_image.shape[:2])
        h, w, ch = resized_image.shape
        resized_image = resize(resized_image, (int(h * ratio), int(w * ratio)))
        # ).astype('float')
    ()
    fact_i = image.shape[0] / resized_image.shape[0]
    fact_j = image.shape[1] / resized_image.shape[1]

    input_image = preprocess_image(resized_image,
                                   preprocessing=args.preprocessing)
    with torch.no_grad():
        if args.multiscale:
            keypoints, scores, descriptors = process_multiscale(
                torch.tensor(input_image[np.newaxis, :, :, :].astype(
                    np.float32),
                             device=device), model)
        else:
            keypoints, scores, descriptors = process_multiscale(torch.tensor(
                input_image[np.newaxis, :, :, :].astype(np.float32),
                device=device),
                                                                model,
                                                                scales=[1])

    # Input image coordinates
    keypoints[:, 0] *= fact_i
    keypoints[:, 1] *= fact_j
    # i, j -> u, v
    keypoints = keypoints[:, [1, 0, 2]]
    return keypoints, scores, descriptors
예제 #14
0
    if max(resized_image.shape) > args.max_edge:
        resized_image = scipy.misc.imresize(
            resized_image,
            args.max_edge / max(resized_image.shape)
        ).astype('float')
    if sum(resized_image.shape[: 2]) > args.max_sum_edges:
        resized_image = scipy.misc.imresize(
            resized_image,
            args.max_sum_edges / sum(resized_image.shape[: 2])
        ).astype('float')

    fact_i = image.shape[0] / resized_image.shape[0]
    fact_j = image.shape[1] / resized_image.shape[1]

    input_image = preprocess_image(
        resized_image,
        preprocessing=args.preprocessing
    )
    with torch.no_grad():
        if args.multiscale:
            keypoints, scores, descriptors = process_multiscale(
                torch.tensor(
                    input_image[np.newaxis, :, :, :].astype(np.float32),
                    device=device
                ),
                model
            )
        else:
            keypoints, scores, descriptors = process_multiscale(
                torch.tensor(
                    input_image[np.newaxis, :, :, :].astype(np.float32),
                    device=device
    def __getitem__(self, idx):
        (image1, depth1, intrinsics1, pose1, bbox1, image2, depth2,
         intrinsics2, pose2, bbox2) = self.recover_pair(self.dataset[idx])

        image1 = preprocess_image(
            image1, preprocessing=self.preprocessing)  # 得到BGR格式图像,不做中心化
        image2 = preprocess_image(
            image2, preprocessing=self.preprocessing)  # 得到BGR格式图像,不做中心化
        original_image1 = image1
        original_image2 = image2
        '''
        return {
            'image1': torch.from_numpy(image1.astype(np.float32)),
            'depth1': torch.from_numpy(depth1.astype(np.float32)),
            'intrinsics1': torch.from_numpy(intrinsics1.astype(np.float32)),
            'pose1': torch.from_numpy(pose1.astype(np.float32)),
            'bbox1': torch.from_numpy(bbox1.astype(np.float32)),
            'image2': torch.from_numpy(image2.astype(np.float32)),
            'depth2': torch.from_numpy(depth2.astype(np.float32)),
            'intrinsics2': torch.from_numpy(intrinsics2.astype(np.float32)),
            'pose2': torch.from_numpy(pose2.astype(np.float32)),
            'bbox2': torch.from_numpy(bbox2.astype(np.float32))
        }
        '''

        # 用SuperPoint处理两张图片,得到keypoints、descriptors、scores
        sift = self.sift
        image1 = np.transpose(image1, (1, 2, 0))
        image1 = Image.fromarray(np.uint8(image1))
        image1 = image1.convert('L')
        image1 = np.array(image1)
        image2 = np.transpose(image2, (1, 2, 0))
        image2 = Image.fromarray(np.uint8(image2))
        image2 = image2.convert('L')
        image2 = np.array(image2)
        kp1, descs1 = sift.detectAndCompute(image1, None)
        kp2, descs2 = sift.detectAndCompute(image2, None)

        # limit the number of keypoints
        kp1_num = min(self.nfeatures, len(kp1))
        kp2_num = min(self.nfeatures, len(kp2))
        kp1 = kp1[:kp1_num]
        kp2 = kp2[:kp2_num]

        kp1_np = np.array([(kp.pt[0], kp.pt[1]) for kp in kp1])
        kp2_np = np.array([(kp.pt[0], kp.pt[1]) for kp in kp2])

        # skip this image pair if no keypoints detected in image
        # 不到10个特征点也跳过此图片对
        if len(kp1) < 10 or len(kp2) < 10:
            return {
                'keypoints0': torch.zeros([0, 0, 2], dtype=torch.double),
                'keypoints1': torch.zeros([0, 0, 2], dtype=torch.double),
                'descriptors0': torch.zeros([0, 2], dtype=torch.double),
                'descriptors1': torch.zeros([0, 2], dtype=torch.double),
                'image0': image1,
                'image1': image2,
                'file_name': ''
            }

        # confidence of each key point
        scores1_np = np.array([kp.response for kp in kp1])
        scores2_np = np.array([kp.response for kp in kp2])

        kp1_np = kp1_np[:kp1_num, :]
        kp2_np = kp2_np[:kp2_num, :]
        descs1 = descs1[:kp1_num, :]
        descs2 = descs2[:kp2_num, :]

        kp1_np = kp1_np.reshape((1, -1, 2))
        kp2_np = kp2_np.reshape((1, -1, 2))
        descs1 = np.transpose(descs1 / 256.)
        descs2 = np.transpose(descs2 / 256.)

        image1 = torch.from_numpy(image1 / 255.).double()[None].cuda()
        image2 = torch.from_numpy(image2 / 255.).double()[None].cuda()

        # print(image1.shape, image2.shape, depth1.shape, depth2.shape)

        # 根据10元组和keypoints,得到所有匹配,按SuperGlue的输入要求返回结果
        # image1, depth1, intrinsics1, pose1, bbox1
        # image2, depth2, intrinsics2, pose2, bbox2
        # depth: (256, 256), intrinsics: (3, 3), pose: (4, 4), bbox: (2)
        # 例子:all_matches = list(np.array([[0],[0]]))
        try:
            all_matches = self.compute_all_matches(kp1_np, original_image1,
                                                   depth1, intrinsics1, pose1,
                                                   bbox1, kp2_np,
                                                   original_image2, depth2,
                                                   intrinsics2, pose2, bbox2)
        except EmptyTensorError:
            return {
                'keypoints0': torch.zeros([0, 0, 2], dtype=torch.double),
                'keypoints1': torch.zeros([0, 0, 2], dtype=torch.double),
                'descriptors0': torch.zeros([0, 2], dtype=torch.double),
                'descriptors1': torch.zeros([0, 2], dtype=torch.double),
                'image0': image1,
                'image1': image2,
                'file_name': ''
            }

        # print(kp1_np.shape, kp2_np.shape, len(all_matches[0]))

        return {
            'keypoints0': list(kp1_np),
            'keypoints1': list(kp2_np),
            'descriptors0': list(descs1),
            'descriptors1': list(descs2),
            'scores0': list(scores1_np),
            'scores1': list(scores2_np),
            'image0': image1,
            'image1': image2,
            'all_matches': all_matches,
            'file_name': ''
        }

        # SuperGlue要的返回值
        '''
    def extract_features(
            self,
            image_list,
            only_path=True,  #only path means that the path to the images is given opposed to image files are given
            preprocessing='caffe',
            output_extension='.d2-net',
            output_type='npz',
            multiscale=False,
            store_results=False):

        #print(args)
        if type(image_list) is not list:
            image_list = [image_list]
        # Process the file
        #for image in tqdm(image_list, total=len(image_list)):
        k, d, s = [], [], []
        for image in image_list:
            if only_path:
                image = imageio.imread(image)
            if len(image.shape) == 2:
                image = image[:, :, np.newaxis]
                image = np.repeat(image, 3, -1)

            resized_image = self.__resize_image__(image)

            fact_i = image.shape[0] / resized_image.shape[0]
            fact_j = image.shape[1] / resized_image.shape[1]

            input_image = preprocess_image(resized_image,
                                           preprocessing=preprocessing)
            with torch.no_grad():
                if multiscale:
                    keypoints, scores, descriptors = process_multiscale(
                        torch.tensor(input_image[np.newaxis, :, :, :].astype(
                            np.float32),
                                     device=self.device), self.model)
                else:
                    keypoints, scores, descriptors = process_multiscale(
                        torch.tensor(input_image[np.newaxis, :, :, :].astype(
                            np.float32),
                                     device=self.device),
                        self.model,
                        scales=[1])

            # Input image coordinates
            keypoints[:, 0] *= fact_i
            keypoints[:, 1] *= fact_j
            # i, j -> u, v
            keypoints = keypoints[:, [1, 0, 2]]

            if store_results:
                if output_type == 'npz':
                    with open(path + output_extension, 'wb') as output_file:
                        np.savez(output_file,
                                 keypoints=keypoints,
                                 scores=scores,
                                 descriptors=descriptors)
                elif output_type == 'mat':
                    with open(path + output_extension, 'wb') as output_file:
                        scipy.io.savemat(
                            output_file, {
                                'keypoints': keypoints,
                                'scores': scores,
                                'descriptors': descriptors
                            })
                else:
                    raise ValueError('Unknown output type.')
            else:
                k.append(keypoints)
                d.append(descriptors)
                s.append(scores)
        return k, d, s