def test_transform_siamfc(self):
        base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
        transform = TransformSiamFC(stats_path=self.stats_path)
        dataset = Pairwise(base_dataset, transform=transform, subset='train')
        self.assertGreater(len(dataset), 0)

        for crop_z, crop_x, labels, weights in dataset:
            self.assertAlmostEqual(weights[labels == 1].sum().item(),
                                   weights[labels == 0].sum().item())
            self.assertAlmostEqual(weights.sum().item(),
                                   labels[labels >= 0].numel())
            self.assertEqual(
                weights[labels == transform.ignore_label].sum().item(), 0)

        if self.visualize:
            crop_z, crop_x, labels, weights = random.choice(dataset)
            crop_z = F.to_pil_image(crop_z / 255.0)
            crop_x = F.to_pil_image(crop_x / 255.0)
            labels = self._rescale(labels.cpu().squeeze().numpy())
            weights = self._rescale(weights.cpu().squeeze().numpy())

            bndbox_z = np.array([31, 31, 64, 64])
            bndbox_x = np.array([95, 95, 64, 64])

            show_frame(crop_z, bndbox_z, fig_n=1, pause=1)
            show_frame(crop_x, bndbox_x, fig_n=2, pause=1)
            show_frame(labels, fig_n=3, pause=1, cmap='hot')
            show_frame(weights, fig_n=4, pause=5, cmap='hot')
예제 #2
0
    def test_pairwise(self):
        base_dataset = OTB(self.otb_dir, download=True)
        frame_range = random.choice([0, 1, 100])
        causal = random.choice([True, False])
        subset = random.choice(['train', 'val'])
        return_index = random.choice([True, False])
        rand_choice = random.choice([True, False])
        dataset = Pairwise(
            base_dataset, pairs_per_video=1, frame_range=frame_range,
            causal=causal, subset=subset, return_index=return_index,
            rand_choice=rand_choice)
        self.assertGreater(len(dataset), 0)

        for i, item in enumerate(dataset):
            img_z, img_x, bndbox_z, bndbox_x = \
                item[0], item[1], item[2], item[3]
            if return_index:
                print('rand_z:', item[4], '\trand_x:', item[5])
            self.assertEqual(img_z.mode, 'RGB')
            self.assertEqual(img_x.mode, 'RGB')
            self.assertEqual(bndbox_z.shape, (4,))
            self.assertEqual(bndbox_x.shape, (4,))

        if self.visualize:
            item = random.choice(dataset)
            img_z, img_x, bndbox_z, bndbox_x = \
                item[0], item[1], item[2], item[3]
            if return_index:
                print('rand_z:', item[4], '\trand_x:', item[5])
            show_frame(img_z, bndbox_z, fig_n=1, pause=1)
            show_frame(img_x, bndbox_x, fig_n=2, pause=1)
예제 #3
0
    def test_transform_goturn(self):
        base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
        transform = TransformGOTURN()
        dataset = Pairwise(base_dataset,
                           transform,
                           pairs_per_video=1,
                           frame_range=1,
                           causal=True)
        self.assertGreater(len(dataset), 0)

        for crop_z, crop_x, labels in dataset:
            self.assertEqual(crop_z.size(), crop_x.size())

        if self.visualize:
            for t in range(10):
                crop_z, crop_x, labels = random.choice(dataset)
                mean_color = torch.tensor(transform.mean_color).float().view(
                    3, 1, 1)
                crop_z = F.to_pil_image((crop_z + mean_color) / 255.0)
                crop_x = F.to_pil_image((crop_x + mean_color) / 255.0)
                labels = labels.cpu().numpy()
                labels *= transform.out_size / transform.label_scale_factor

                bndbox = np.concatenate([labels[:2], labels[2:] - labels[:2]])
                show_frame(crop_x, bndbox, fig_n=1, pause=1)
예제 #4
0
    def test_goturn_train(self):
        tracker = TrackerGOTURN(net_path=self.net_path)
        transform = TransformGOTURN()

        base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
        dataset = Pairwise(
            base_dataset, transform, frame_range=1, causal=True)
        dataloader = DataLoader(dataset, batch_size=2, shuffle=True)

        # training loop
        for it, batch in enumerate(dataloader):
            update_lr = it == 0
            loss = tracker.step(batch, backward=True, update_lr=update_lr)
            print('Iter: {} Loss: {:.6f}'.format(it + 1, loss))

        # validation loop
        for it, batch in enumerate(dataloader):
            loss = tracker.step(batch, backward=False)
            print('Val. Iter: {} Loss: {:.6f}'.format(it + 1, loss))
예제 #5
0
    def test_siamfc_train_v2(self):
        tracker = TrackerSiamFC(branch='alexv2')
        transform = TransformSiamFC(
            stats_path=self.stats_path, score_sz=33,
            r_pos=8, total_stride=4)

        base_dataset = VOT(self.vot_dir, return_rect=True, download=True)
        dataset = Pairwise(base_dataset, transform, pairs_per_video=1)
        dataloader = DataLoader(dataset, batch_size=2, shuffle=True)

        # training loop
        for it, batch in enumerate(dataloader):
            update_lr = it == 0
            loss = tracker.step(batch, backward=True, update_lr=update_lr)
            print('Iter: {} Loss: {:.6f}'.format(it + 1, loss))

        # val loop
        for it, batch in enumerate(dataloader):
            loss = tracker.step(batch, backward=False)
            print('Val. Iter: {} Loss: {:.6f}'.format(it + 1, loss))