Beispiel #1
0
 def logging_stat_cae(self, mode, cur_epoch, cur_fold, mean_loss,
                      num_folds):
     """ Report statistics and record them in tensorboard """
     logger_.info(
         f"[{cur_fold}/{num_folds}][{cur_epoch}/{self.args.num_epoch}] "
         f"{mode} loss: {np.round(mean_loss, 6)}")
     writer_.add_scalars(main_tag=f"{mode}/loss",
                         tag_scalar_dict={f"fold{cur_fold}": mean_loss},
                         global_step=cur_epoch)
Beispiel #2
0
 def _show_data_composition(self):
     dataset_type = "TRAIN" if self.train else "TEST"
     logger_.info(f"*** {dataset_type} DATA COMPOSITION in Cross-Validation ***")
     for data_nm, idx_list in zip(["TRAIN", "VALID"], [self.train_idx_list, self.valid_idx_list]):
         for i in range(len(idx_list)):
             res = {}
             for cls_nm, cls_idx in self.class_to_idx.items():
                 cnt = np.count_nonzero(np.array(self.targets)[idx_list[i]] == cls_idx)
                 res[cls_nm] = cnt
             logger_.info(f"[{data_nm}][{i+1}] {res}")
Beispiel #3
0
    def cross_validation(self):
        """ Performing training and validation. It can be used for cross-validation and testing """
        for i in range(self.num_folds):
            # setup for one pattern of the n-fold cross-validation
            logger_.info(
                f"** [{i + 1}/{self.num_folds}] {i + 1}-th CROSS-VALIDATION **"
            )
            logger_.info(
                f"** [{i + 1}/{self.num_folds}] SETUP DATASET and MODEL **")
            # get train dataset consisting of n-1 folds
            train = self.cv_dataset.get_train_dataset(i)
            # get valid dataset consisting of 1 fold
            valid = self.cv_dataset.get_valid_dataset(i)
            # construct model and optimizer
            model = self.__get_model().to(self.device)
            optimizer = model.get_optimizer()
            # define early stopping
            es = self._get_early_stopping()
            self.__logging_materials_one_time(i, {
                "MODEL": model,
                "OPTIMIZER": optimizer,
                "EARLY STOPPING": es
            })

            for j in range(self.args.num_epoch):
                # train
                self.cv_dataset.set_train_transform()
                self._train_epoch(cur_fold=i + 1,
                                  cur_epoch=j + 1,
                                  num_folds=self.num_folds,
                                  model=model,
                                  optimizer=optimizer,
                                  dataset=train,
                                  mode=TrainType.CV_TRAIN,
                                  es=es)
                # validation
                self.cv_dataset.set_valid_transform()
                with torch.no_grad():
                    self._train_epoch(cur_fold=i + 1,
                                      cur_epoch=j + 1,
                                      num_folds=self.num_folds,
                                      model=model,
                                      optimizer=optimizer,
                                      dataset=valid,
                                      mode=TrainType.CV_VALID,
                                      es=es)
                if es.is_stop:
                    logger_.info("FINISH TRAINING BY EARLY STOPPING")
                    logger_.info("EARLY STOP INFO")
                    logger_.info(es)
                    break
Beispiel #4
0
    def __init__(self, num_classes=1000, aux_logits=False,
                 inception_blocks=None, init_weights=True, freeze_until=-1):
        super(Inception3, self).__init__()
        if inception_blocks is None:
            inception_blocks = [
                BasicConv2d, InceptionA, InceptionB, InceptionC,
                InceptionD, InceptionE, InceptionAux
            ]
        assert len(inception_blocks) == 7
        conv_block = inception_blocks[0]
        inception_a = inception_blocks[1]
        inception_b = inception_blocks[2]
        inception_c = inception_blocks[3]
        inception_d = inception_blocks[4]
        inception_e = inception_blocks[5]
        inception_aux = inception_blocks[6]

        self.aux_logits = aux_logits
        self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2)
        self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
        self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
        self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
        self.Mixed_5b = inception_a(192, pool_features=32)
        self.Mixed_5c = inception_a(256, pool_features=64)
        self.Mixed_5d = inception_a(288, pool_features=64)
        self.Mixed_6a = inception_b(288)
        self.Mixed_6b = inception_c(768, channels_7x7=128)
        self.Mixed_6c = inception_c(768, channels_7x7=160)
        self.Mixed_6d = inception_c(768, channels_7x7=160)
        self.Mixed_6e = inception_c(768, channels_7x7=192)
        if aux_logits:
            self.AuxLogits = inception_aux(768, num_classes)
        self.Mixed_7a = inception_d(768)
        self.Mixed_7b = inception_e(1280)
        self.Mixed_7c = inception_e(2048)
        self.fc = nn.Linear(2048, num_classes)
        if init_weights:
            logger_.info(f"Weight initialization...")
            for idx, m in enumerate(self.modules()):
                if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                    stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                    self.truncated_normal_(m.weight.data, 0.0, stddev)
                elif isinstance(m, nn.BatchNorm2d):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)

        self._freeze_layers(freeze_until=freeze_until)
Beispiel #5
0
    def download(self, wnid, img_dir_name, limit=0, verbose=False):
        """
        指定したwnidに属する画像をimgフォルダに保存
        """
        if os.path.exists(f"./{self.img_dir}/{img_dir_name}"):
            logger_.info(
                f"Download no files. {self.img_dir}/{img_dir_name} already exists."
            )
        else:
            self._check_wnid(wnid)
            list_path = os.path.join(self.list_dir, wnid + '.txt')
            if not os.path.exists(list_path):
                self._download_imglist(self.list_dir, wnid)

            imginfo = self._make_imginfo(list_path)

            img_dir = os.path.join(self.img_dir, img_dir_name)
            os.makedirs(img_dir, exist_ok=True)

            self._download_imgs(img_dir, imginfo, limit, verbose)
Beispiel #6
0
    def logging_stat_cnn(self, mode: str, cur_epoch: int, cur_fold: int, stats,
                         mean_loss, num_folds):
        """ Report statistics and record them in tensorboard """
        # logging overall loss and acc
        for stat_nm, stat in zip(["loss", "acc"],
                                 [mean_loss, stats["accuracy"]]):
            writer_.add_scalars(main_tag=f"{mode}/{stat_nm}",
                                tag_scalar_dict={f"fold{cur_fold}": stat},
                                global_step=cur_epoch)
        logger_.info(
            f"[{cur_fold}/{num_folds}][{cur_epoch}/{self.args.num_epoch}] "
            f"{mode} loss: {np.round(mean_loss, 4)}, {mode} acc: {np.round(stats['accuracy'], 4)}"
        )

        # logging precision, recall and f1-score per class
        for cls_nm, stat in stats.items():
            if cls_nm in self.class_names:
                for stat_type in ["precision", "recall", "f1-score"]:
                    writer_.add_scalars(
                        main_tag=f"{mode}_fold{cur_fold}/{stat_type}",
                        tag_scalar_dict={f"{cls_nm}": stat[stat_type]},
                        global_step=cur_epoch)
Beispiel #7
0
 def __logging_materials_one_time(fold_seq, target_map):
     if fold_seq == 0:
         for name, material in target_map.items():
             logger_.info(f"*** {name} ***")
             logger_.info(material)
def main():
    args, config = initialization()
    from utils.logger import logger_
    from dataset.sub_cifar10.cifar10_cv import CIFAR10CV
    from dataset.sub_cifar10.cifar10_test import CIFAR10Test
    from trainer.sub_trainer.train_cae_cnn import TrainCAECNN
    from trainer.sub_trainer.train_only_cnn import TrainOnlyCNN
    from trainer.sub_trainer.train_only_cae import TrainOnlyCAE

    logger_.info("*** SET DEVICE ***")
    device = "cpu" if args.use_gpu == 0 else "cuda"
    logger_.info(f"Device is {device}")

    logger_.info("*** CREATE DATASET ***")
    trainset = CIFAR10CV(root='./files/input/dataset', train=True, download=True, args=args,
                         reg_map=config["train_data_regulation"],
                         expand_map=config["train_data_expansion"] if "train_data_expansion" in config.keys() else None)
    testset = CIFAR10Test(root='./files/input/dataset', train=False, download=True, args=args, cifar10_cv=trainset)

    logger_.info("*** DEFINE TRAINER ***")
    # train both CAE and CNN
    if config["use_cae"] and config["use_cnn"]:
        trainer_cv = TrainCAECNN(trainset, args, config, device)
        trainer_test = TrainCAECNN(testset, args, config, device)
    # only train CNN
    elif config["use_cnn"]:
        trainer_cv = TrainOnlyCNN(trainset, args, config, device)
        trainer_test = TrainOnlyCNN(testset, args, config, device)
    # only train CAE
    elif config["use_cae"]:
        trainer_cv = TrainOnlyCAE(trainset, args, config, device)
        trainer_test = TrainOnlyCAE(testset, args, config, device)
    else:
        assert False, "At least one model should be specified."

    # training and validation
    if args.do_cv:
        logger_.info("*** CROSS-VALIDATION ***")
        trainer_cv.cross_validation()
    if args.do_test:
        logger_.info("*** TEST ***")
        trainer_test.cross_validation()

    exit(0)
Beispiel #9
0
def main():
    from config.config_grad_cam import ConfigGradCAM
    args: ConfigGradCAM = initialization()

    import copy
    import torch
    from torchvision.datasets import ImageFolder
    from torch.utils.data.dataloader import DataLoader
    from utils.logger import logger_
    from dataset.img_transform import ImgTransform
    from utils.downloader import ImageNet
    from models.cam.grad_cam import GradCAM
    from models.cam.guided_backprop import GuidedBackprop
    from models.cam.guided_grad_cam import GuidedGradCAM
    from models.cnn.inception import Inception3

    logger_.info("*** SET DEVICE ***")
    device = "cuda" if args.use_gpu else "cpu"
    logger_.info(f"Device is {device}")

    logger_.info("*** DOWNLOAD DATASET ***")
    # download images of standard poodles
    api = ImageNet(root="./files/input/dataset")
    api.download(str(args.wnid),
                 str(args.target_cls_idx),
                 verbose=True,
                 limit=32)

    logger_.info("*** CREATE DATASET ***")
    trans = ImgTransform(args, is_train=False)
    dataset = ImageFolder("./files/input/dataset/img", transform=trans)

    logger_.info("*** CREATE DATA LOADER ***")
    loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
    assert loader.batch_size == 1, f"batch size should be 1 but got {loader.batch_size}"

    logger_.info("*** LOAD CNN MODEL ***")
    model_gc = Inception3(num_classes=1000, aux_logits=True).to(device)
    model_gc.load_pre_train_weights(progress=True)
    model_gp = copy.deepcopy(model_gc).to(device)

    logger_.info("*** Prepare GradCAM and Guided Backprop procedures ***")
    grad_cam = GradCAM(model=model_gc,
                       f_get_last_module=lambda model_: model_.Mixed_7c,
                       device=device)
    guided_backprop = GuidedBackprop(model=model_gp)

    logger_.info("*** VISUALIZATION ***")
    for id, batch in enumerate(loader):
        logger_.info(f"[{id+1}/{len(loader)}] processing...")
        image, _ = batch
        # calc and visualize heatmap of Grad-CAM
        heatmap, probs = grad_cam(image, cls_idx=args.target_cls_idx)
        # calc and visualize guided backprop
        gb = guided_backprop(image, cls_idx=args.target_cls_idx)
        # calc and visualize Guided Grad-CAM
        ggc = GuidedGradCAM.calc_guided_grad_cam(heatmap, gb)
        GuidedGradCAM.save_all_output(image, heatmap, ggc, probs,
                                      args.target_cls_idx, id)

    exit(0)