Exemple #1
0
    def test_crop_tensor(self):
        dataset = OTB(self.otb_dir, download=True)

        padding = random.choice([None, 0, 'avg'])
        out_size = random.choice([255])
        print('[PyTorch-crop] padding:', padding, 'out_size:', out_size)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = cv2.imread(img_file)
            if image.ndim == 2:
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif image.ndim == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = torch.from_numpy(image).permute(2, 0,
                                                    1).unsqueeze(0).float()
            bndbox = torch.from_numpy(anno[f, :]).float()
            center = bndbox[:2] + bndbox[2:] / 2
            patch = crop_tensor(image,
                                center,
                                bndbox[2:],
                                padding=padding,
                                out_size=out_size)
            patch = patch.squeeze().permute(1, 2,
                                            0).cpu().numpy().astype(np.uint8)
            show_frame(patch, fig_n=1, pause=0.1)
Exemple #2
0
    def test_pairwise(self):
        base_dataset = OTB(self.otb_dir, download=True)
        frame_range = random.choice([0, 1, 100])
        causal = random.choice([True, False])
        subset = random.choice(['train', 'val'])
        return_index = random.choice([True, False])
        rand_choice = random.choice([True, False])
        dataset = Pairwise(
            base_dataset, pairs_per_video=1, frame_range=frame_range,
            causal=causal, subset=subset, return_index=return_index,
            rand_choice=rand_choice)
        self.assertGreater(len(dataset), 0)

        for i, item in enumerate(dataset):
            img_z, img_x, bndbox_z, bndbox_x = \
                item[0], item[1], item[2], item[3]
            if return_index:
                print('rand_z:', item[4], '\trand_x:', item[5])
            self.assertEqual(img_z.mode, 'RGB')
            self.assertEqual(img_x.mode, 'RGB')
            self.assertEqual(bndbox_z.shape, (4,))
            self.assertEqual(bndbox_x.shape, (4,))

        if self.visualize:
            item = random.choice(dataset)
            img_z, img_x, bndbox_z, bndbox_x = \
                item[0], item[1], item[2], item[3]
            if return_index:
                print('rand_z:', item[4], '\trand_x:', item[5])
            show_frame(img_z, bndbox_z, fig_n=1, pause=1)
            show_frame(img_x, bndbox_x, fig_n=2, pause=1)
    def test_transform_goturn(self):
        base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
        transform = TransformGOTURN()
        dataset = Pairwise(base_dataset,
                           transform,
                           pairs_per_video=1,
                           frame_range=1,
                           causal=True)
        self.assertGreater(len(dataset), 0)

        for crop_z, crop_x, labels in dataset:
            self.assertEqual(crop_z.size(), crop_x.size())

        if self.visualize:
            for t in range(10):
                crop_z, crop_x, labels = random.choice(dataset)
                mean_color = torch.tensor(transform.mean_color).float().view(
                    3, 1, 1)
                crop_z = F.to_pil_image((crop_z + mean_color) / 255.0)
                crop_x = F.to_pil_image((crop_x + mean_color) / 255.0)
                labels = labels.cpu().numpy()
                labels *= transform.out_size / transform.label_scale_factor

                bndbox = np.concatenate([labels[:2], labels[2:] - labels[:2]])
                show_frame(crop_x, bndbox, fig_n=1, pause=1)
Exemple #4
0
    def test_pad_pil(self):
        dataset = OTB(self.otb_dir, download=True)

        npad = random.choice([0, 10, 50])
        padding = random.choice([None, 0, 'avg'])
        print('[PIL-pad] padding:', padding, 'npad:', npad)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = Image.open(img_file)
            image = pad_pil(image, npad, padding=padding)
            show_frame(image, fig_n=1)
Exemple #5
0
    def test_load(self):
        dataset = VOT(self.vot_dir, return_rect=True)
        self.assertGreater(len(dataset), 0)

        for img_files, anno in dataset:
            self.assertGreater(len(img_files), 0)
            self.assertEqual(len(img_files), len(anno))

        if self.visualize:
            img_files, anno = random.choice(dataset)
            for f, img_file in enumerate(img_files):
                image = Image.open(img_file)
                show_frame(image, anno[f, :])
    def test_transform_siamfc(self):
        base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
        transform = TransformSiamFC(stats_path=self.stats_path)
        dataset = Pairwise(base_dataset, transform=transform, subset='train')
        self.assertGreater(len(dataset), 0)

        for crop_z, crop_x, labels, weights in dataset:
            self.assertAlmostEqual(weights[labels == 1].sum().item(),
                                   weights[labels == 0].sum().item())
            self.assertAlmostEqual(weights.sum().item(),
                                   labels[labels >= 0].numel())
            self.assertEqual(
                weights[labels == transform.ignore_label].sum().item(), 0)

        if self.visualize:
            crop_z, crop_x, labels, weights = random.choice(dataset)
            crop_z = F.to_pil_image(crop_z / 255.0)
            crop_x = F.to_pil_image(crop_x / 255.0)
            labels = self._rescale(labels.cpu().squeeze().numpy())
            weights = self._rescale(weights.cpu().squeeze().numpy())

            bndbox_z = np.array([31, 31, 64, 64])
            bndbox_x = np.array([95, 95, 64, 64])

            show_frame(crop_z, bndbox_z, fig_n=1, pause=1)
            show_frame(crop_x, bndbox_x, fig_n=2, pause=1)
            show_frame(labels, fig_n=3, pause=1, cmap='hot')
            show_frame(weights, fig_n=4, pause=5, cmap='hot')
Exemple #7
0
    def test_imagenet_obj(self):
        subset = random.choice(['train', 'val'])
        dataset = ImageNetObject(self.obj_dir, subset=subset, return_rect=True)
        self.assertGreater(len(dataset), 0)

        for i in range(10):
            img_file, bndbox = dataset[i]
            self.assertTrue(os.path.isfile(img_file))
            self.assertTrue(len(bndbox) == 4)

            if self.visualize:
                img_file, bndbox = random.choice(dataset)
                image = Image.open(img_file)
                show_frame(image, bndbox, fig_n=1, pause=0.1)
Exemple #8
0
    def test_imagenet_vid(self):
        dataset = ImageNetVID(self.vid_dir, return_rect=True)
        self.assertGreater(len(dataset), 0)

        for i in range(10):
            img_files, anno = random.choice(dataset)
            self.assertGreater(len(img_files), 0)
            self.assertEqual(len(img_files), len(anno))

        if self.visualize:
            img_files, anno = random.choice(dataset)
            for f, img_file in enumerate(img_files):
                image = Image.open(img_file)
                show_frame(image, anno[f, :])
Exemple #9
0
    def test_pad_array(self):
        dataset = OTB(self.otb_dir, download=True)

        npad = random.choice([0, 10, 50])
        padding = random.choice([None, 0, 'avg'])
        print('[cv2-pad] padding:', padding, 'npad:', npad)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = cv2.imread(img_file)
            if image.ndim == 2:
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif image.ndim == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = pad_array(image, npad, padding=padding)
            show_frame(image[:, :, ::-1], fig_n=1)
Exemple #10
0
    def test_crop_pil(self):
        dataset = OTB(self.otb_dir, download=True)

        padding = random.choice([None, 0, 'avg'])
        out_size = random.choice([None, 255])
        print('[PIL-crop] padding:', padding, 'out_size:', out_size)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = Image.open(img_file)
            bndbox = anno[f, :]
            center = bndbox[:2] + bndbox[2:] / 2
            patch = crop_pil(image,
                             center,
                             bndbox[2:],
                             padding=padding,
                             out_size=out_size)
            show_frame(patch, fig_n=2, pause=0.1)
Exemple #11
0
    def test_resize_tensor(self):
        dataset = OTB(self.otb_dir, download=True)

        out_size = random.choice([30, 100, 255])
        print('[PyTorch-resize]:', out_size)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = cv2.imread(img_file)
            if image.ndim == 2:
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif image.ndim == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = torch.from_numpy(image).permute(2, 0,
                                                    1).unsqueeze(0).float()
            image = resize_tensor(image, out_size)
            image = image.squeeze().permute(1, 2, 0).numpy().astype(np.uint8)
            show_frame(image, fig_n=2, pause=0.1)
Exemple #12
0
    def test_crop_array(self):
        dataset = OTB(self.otb_dir, download=True)

        padding = random.choice([None, 0, 'avg'])
        out_size = random.choice([None, 255])
        print('[cv2-crop] padding:', padding, 'out_size:', out_size)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = cv2.imread(img_file)
            if image.ndim == 2:
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif image.ndim == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            bndbox = anno[f, :]
            center = bndbox[:2] + bndbox[2:] / 2
            patch = crop_array(image,
                               center,
                               bndbox[2:],
                               padding=padding,
                               out_size=out_size)
            show_frame(patch, fig_n=2, pause=0.1)