def draw_IMSAT_table(self, num_samples=20):
        # no shuffle
        from .utils import Image_Pool
        from torchvision.utils import make_grid

        assert isinstance(self.val_loader.sampler,
                          torch.utils.data.SequentialSampler)

        with torch.no_grad():
            best_score, (target, soft_preds) = self._eval_loop(
                val_loader=self.val_loader,
                epoch=100000,
                mode=ModelMode.EVAL,
                return_soft_predict=True)
        images = []
        # make cifar10 image to be colorful.
        val_loader = dcp(self.val_loader)
        if val_loader.dataset_name in ("cifar", "svhn"):
            val_loader.dataset.datasets[0].datasets[0].transform.transforms[
                2] = pil_augment.Img2Tensor(include_rgb=True,
                                            include_grey=False)
            val_loader.dataset.datasets[0].datasets[1].transform.transforms[
                2] = pil_augment.Img2Tensor(include_rgb=True,
                                            include_grey=False)

        for image_gt in val_loader:
            img, gt, *_ = list(zip(*image_gt))
            img, gt = img[0], gt[0]
            images.append(img)

        images = torch.cat(images, 0)
        image_pool = Image_Pool(num_samples, 10)
        image_pool.add(images, torch.Tensor(soft_preds.argmax(dim=1).float()))
        image_dict = image_pool.image_pool()
        first_image_size = make_grid(image_dict[0], nrow=num_samples).shape
        whole_image = torch.ones(first_image_size[0], first_image_size[1] * 10,
                                 first_image_size[2])
        for i in range(10):
            whole_image[:, first_image_size[1] * i:first_image_size[1] *
                        (i + 1), :] = make_grid(image_dict[i],
                                                nrow=num_samples)
        imsat_images = Image.fromarray(
            (whole_image.numpy().transpose(1, 2, 0) * 255.0).astype(np.uint8))
        imsat_images.save(f"{self.save_dir}/imsat_image.png")
Exemple #2
0
                split=split,
                transform=image_transform,
                target_transform=target_transform,
                download=True,
                **dataset_dict,
            )
            _datasets.append(dataset)
        serial_dataset = reduce(lambda x, y: x + y, _datasets)
        return serial_dataset


# ===================== public transform interface ===========================
svhn_naive_transform = {
    # output size 32*32
    "tf1": transforms.Compose([
        pil_augment.Img2Tensor(),
    ]),
    "tf2": transforms.Compose([
        pil_augment.RandomCrop(size=32, padding=2, ),
        pil_augment.Img2Tensor(),
    ]
    ),
    "tf3": transforms.Compose([
        pil_augment.Img2Tensor(),
    ]),
}
svhn_strong_transform = {
    # output size 32*32
    "tf1": transforms.Compose([
        pil_augment.CenterCrop(size=(28, 28)),
        pil_augment.Resize(size=32, interpolation=PIL.Image.BILINEAR),
Exemple #3
0
                target_transform=target_transform,
                download=True,
                **dataset_dict,
            )
            _datasets.append(dataset)
        serial_dataset = reduce(lambda x, y: x + y, _datasets)
        return serial_dataset


# ============================== public transform interface ================================
stl10_strong_transform = {
    "tf1":
    transforms.Compose([
        pil_augment.RandomCrop(size=(64, 64), padding=None),
        pil_augment.Resize(size=(64, 64), interpolation=0),
        pil_augment.Img2Tensor(include_grey=True, include_rgb=False),
    ]),
    "tf2":
    transforms.Compose([
        pil_augment.RandomCrop(size=(64, 64), padding=None),
        pil_augment.Resize(size=(64, 64), interpolation=0),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ColorJitter(
            brightness=[0.6, 1.4],
            contrast=[0.6, 1.4],
            saturation=[0.6, 1.4],
            hue=[-0.125, 0.125],
        ),
        pil_augment.Img2Tensor(include_grey=True, include_rgb=False),
    ]),
    "tf3":
    def supervised_training(self, use_pretrain=True, lr=1e-3, data_aug=False):
        # load the best checkpoint
        self.load_checkpoint(
            torch.load(str(Path(self.checkpoint) / self.checkpoint_identifier),
                       map_location=torch.device("cpu")))
        self.model.to(self.device)

        from torchvision import transforms
        transform_train = transforms.Compose([
            pil_augment.CenterCrop(size=(20, 20)),
            pil_augment.Resize(size=(32, 32), interpolation=PIL.Image.NEAREST),
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            pil_augment.Img2Tensor()
        ])
        transform_val = transforms.Compose([
            pil_augment.CenterCrop(size=(20, 20)),
            pil_augment.Resize(size=(32, 32), interpolation=PIL.Image.NEAREST),
            pil_augment.Img2Tensor()
        ])

        self.kl = KL_div(reduce=True)

        def _sup_train_loop(train_loader, epoch):
            self.model.train()
            train_loader_ = tqdm_(train_loader)
            for batch_num, (image_gt) in enumerate(train_loader_):
                image, gt = zip(*image_gt)
                image = image[0].to(self.device)
                gt = gt[0].to(self.device)

                if self.use_sobel:
                    image = self.sobel(image)

                pred = self.model.torchnet(image)[0]
                loss = self.kl(pred, class2one_hot(gt, 10).float())
                self.model.zero_grad()
                loss.backward()
                self.model.step()
                linear_meters["train_loss"].add(loss.item())
                linear_meters["train_acc"].add(pred.max(1)[1], gt)
                report_dict = {
                    "tra_acc": linear_meters["train_acc"].summary()["acc"],
                    "loss": linear_meters["train_loss"].summary()["mean"],
                }
                train_loader_.set_postfix(report_dict)

            print(f"  Training epoch {epoch}: {nice_dict(report_dict)} ")

        def _sup_eval_loop(val_loader, epoch) -> Tensor:
            self.model.eval()
            val_loader_ = tqdm_(val_loader)
            for batch_num, (image_gt) in enumerate(val_loader_):
                image, gt = zip(*image_gt)
                image = image[0].to(self.device)
                gt = gt[0].to(self.device)

                if self.use_sobel:
                    image = self.sobel(image)

                pred = self.model.torchnet(image)[0]
                linear_meters["val_acc"].add(pred.max(1)[1], gt)
                report_dict = {
                    "val_acc": linear_meters["val_acc"].summary()["acc"]
                }
                val_loader_.set_postfix(report_dict)
            print(f"Validating epoch {epoch}: {nice_dict(report_dict)} ")
            return linear_meters["val_acc"].summary()["acc"]

            # building training and validation set based on extracted features

        train_loader = dcp(self.val_loader)
        train_loader.dataset.datasets = (
            train_loader.dataset.datasets[0].datasets[0], )
        val_loader = dcp(self.val_loader)
        val_loader.dataset.datasets = (
            val_loader.dataset.datasets[0].datasets[1], )

        if data_aug:
            train_loader.dataset.datasets[0].transform = transform_train
            val_loader.dataset.datasets[0].transform = transform_val

        # network and optimization
        if not use_pretrain:
            self.model.torchnet.apply(weights_init)
        else:
            self.model.torchnet.head_B.apply(weights_init)
            # wipe out the initialization
        self.model.optimizer = torch.optim.Adam(
            self.model.torchnet.parameters(), lr=lr)
        self.model.scheduler = torch.optim.lr_scheduler.StepLR(
            self.model.optimizer, step_size=50, gamma=0.2)

        # meters
        meter_config = {
            "train_loss": AverageValueMeter(),
            "train_acc": ConfusionMatrix(self.model.arch_dict["output_k_B"]),
            "val_acc": ConfusionMatrix(self.model.arch_dict["output_k_B"])
        }
        linear_meters = MeterInterface(meter_config)
        drawer = DrawCSV2(
            save_dir=self.save_dir,
            save_name=
            f"supervised_from_checkpoint_{use_pretrain}_data_aug_{data_aug}.png",
            columns_to_draw=[
                "train_loss_mean", "train_acc_acc", "val_acc_acc"
            ])
        for epoch in range(self.max_epoch):
            _sup_train_loop(train_loader, epoch)
            with torch.no_grad():
                _ = _sup_eval_loop(val_loader, epoch)
            self.model.step()
            linear_meters.step()
            linear_meters.summary().to_csv(
                self.save_dir /
                f"supervised_from_checkpoint_{use_pretrain}_data_aug_{data_aug}.csv"
            )
            drawer.draw(linear_meters.summary())
                split=split,
                transform=image_transform,
                target_transform=target_transform,
                download=True,
                **dataset_dict,
            )
            _datasets.append(dataset)
        serial_dataset = reduce(lambda x, y: x + y, _datasets)
        return serial_dataset


# ===================== public transform interface ===========================
svhn_naive_transform = {
    # output size 32*32
    "tf1":
    transforms.Compose([pil_augment.Img2Tensor()]),
    "tf2":
    transforms.Compose(
        [pil_augment.RandomCrop(size=32, padding=2),
         pil_augment.Img2Tensor()]),
    "tf3":
    transforms.Compose([pil_augment.Img2Tensor()]),
}
svhn_strong_transform = {
    # output size 32*32
    "tf1":
    transforms.Compose([
        pil_augment.CenterCrop(size=(28, 28)),
        pil_augment.Resize(size=32, interpolation=PIL.Image.BILINEAR),
        pil_augment.Img2Tensor(),
    ]),