Ejemplo n.º 1
0
    def get_dataloader(self, batch_size, n_worker, train_list, test_list):
        # Loader
        train_loader = None
        test_loader = None
        if train_list is not None and len(train_list) > 0:
            train_dataset = ImageDataset(
                pd.DataFrame(train_list),
                dataset=self.dataset,
                transform=self.train_transform,
            )
            # drop last becasue of BatchNorm1D in IcarlNet
            train_loader = DataLoader(
                train_dataset,
                shuffle=True,
                batch_size=batch_size,
                num_workers=n_worker,
                drop_last=True,
            )

        if test_list is not None:
            test_dataset = ImageDataset(
                pd.DataFrame(test_list),
                dataset=self.dataset,
                transform=self.test_transform,
            )
            test_loader = DataLoader(test_dataset,
                                     shuffle=False,
                                     batch_size=batch_size,
                                     num_workers=n_worker)

        return train_loader, test_loader
Ejemplo n.º 2
0
def main(parser):
    G1 = Generator(input_channels=3, output_channels=1)
    G2 = Generator(input_channels=4, output_channels=3)
    D1 = Discriminator(input_channels=4)
    D2 = Discriminator(input_channels=7)
    '''load'''
    #G1_weights = torch.load('./checkpoints/ST-CGAN_G1_xxxx.pth')
    #G1.load_state_dict(fix_model_state_dict(G1_weights))

    #G2_weights = torch.load('./checkpoints/ST-CGAN_G2_xxxx.pth')
    #G2.load_state_dict(fix_model_state_dict(G2_weights))

    #D1_weights = torch.load('./checkpoints/ST-CGAN_D1_xxxx.pth')
    #D1.load_state_dict(fix_model_state_dict(D1_weights))

    #D2_weights = torch.load('./checkpoints/ST-CGAN_D2_xxxx.pth')
    #D2.load_state_dict(fix_model_state_dict(D2_weights))

    train_img_list, val_img_list = make_datapath_list(phase='train', rate=0.8)

    mean = (0.5, )
    std = (0.5, )
    size = parser.image_size
    batch_size = parser.batch_size
    num_epochs = parser.epoch

    train_dataset = ImageDataset(img_list=train_img_list,
                                 img_transform=ImageTransform(size=size,
                                                              crop_size=256,
                                                              mean=mean,
                                                              std=std))
    val_dataset = ImageDataset(img_list=val_img_list,
                               img_transform=ImageTransform(size=size,
                                                            crop_size=256,
                                                            mean=mean,
                                                            std=std))

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=batch_size, shuffle=True)  #num_workers=4

    G1_update, G2_update, D1_update, D2_update = train_model(
        G1,
        G2,
        D1,
        D2,
        dataloader=train_dataloader,
        val_dataset=val_dataset,
        num_epochs=num_epochs,
        parser=parser,
        save_model_name='ST-CGAN')
def main(parser):
    pconv_unet = PConvUNet()
    '''load'''
    #pconv_weights = torch.load('./checkpoints/PConvUNet_PConvUNet_1000.pth')
    #pconv_unet.load_state_dict(fix_model_state_dict(pconv_weights))

    train_img_list, val_img_list = make_datapath_list(iorm='img',
                                                      path='img_align_celeba',
                                                      phase='train')
    mask_list = make_datapath_list(iorm='mask', path='mask_rectangle')

    mean = (0.5, )
    std = (0.5, )
    size = (parser.image_size, parser.image_size)
    batch_size = parser.batch_size
    num_epochs = parser.epoch

    train_dataset = ImageDataset(img_list=train_img_list,
                                 mask_list=mask_list,
                                 img_transform=ImageTransform(size=size,
                                                              mean=mean,
                                                              std=std),
                                 mask_transform=MaskTransform(size=size))
    val_dataset = ImageDataset(img_list=val_img_list,
                               mask_list=mask_list,
                               img_transform=ImageTransform(size=size,
                                                            mean=mean,
                                                            std=std),
                               mask_transform=MaskTransform(size=size))

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=batch_size, shuffle=True)  #num_workers=4

    pconv_unet_update = train_model(pconv_unet,
                                    dataloader=train_dataloader,
                                    val_dataset=val_dataset,
                                    num_epochs=num_epochs,
                                    parser=parser,
                                    save_model_name='PConvUNet_Rectangle')
Ejemplo n.º 4
0
def main(parser):
    G1 = Generator(input_channels=3, output_channels=1)
    G2 = Generator(input_channels=4, output_channels=3)
    '''load'''
    if parser.load is not None:
        print('load checkpoint ' + parser.load)

        G1_weights = torch.load('./checkpoints/ST-CGAN_G1_' + parser.load +
                                '.pth',
                                map_location=torch.device('cpu'))
        G1.load_state_dict(fix_model_state_dict(G1_weights))

        G2_weights = torch.load('./checkpoints/ST-CGAN_G2_' + parser.load +
                                '.pth',
                                map_location=torch.device('cpu'))
        G2.load_state_dict(fix_model_state_dict(G2_weights))

    mean = (0.5, )
    std = (0.5, )

    size = parser.image_size
    crop_size = parser.crop_size
    resized_size = parser.resized_size

    sri = []

    # test own image
    if parser.image_path is not None:
        print('test ' + parser.image_path)
        sri = test_own_image(G1,
                             G2,
                             parser.image_path,
                             parser.out_path,
                             resized_size,
                             img_transform=ImageTransformOwn(size=size,
                                                             mean=mean,
                                                             std=std))

    # test images from the ISTD dataset
    else:
        print('test ISTD dataset')
        test_img_list = make_datapath_list(phase='test')
        test_dataset = ImageDataset(img_list=test_img_list,
                                    img_transform=ImageTransform(
                                        size=size,
                                        crop_size=crop_size,
                                        mean=mean,
                                        std=std),
                                    phase='test')
        test(G1, G2, test_dataset)
    return sri
Ejemplo n.º 5
0
    def evaluation_ext(self, test_list):
        # evaluation from out of class
        test_dataset = ImageDataset(
            pd.DataFrame(test_list),
            dataset=self.dataset,
            transform=self.test_transform,
        )
        test_loader = DataLoader(test_dataset,
                                 shuffle=False,
                                 batch_size=32,
                                 num_workers=2)
        eval_dict = self.evaluation(test_loader, self.criterion)

        return eval_dict
Ejemplo n.º 6
0
    def _compute_uncert(self, infer_list, infer_transform, uncert_name):
        batch_size = 32
        infer_df = pd.DataFrame(infer_list)
        infer_dataset = ImageDataset(infer_df,
                                     dataset=self.dataset,
                                     transform=infer_transform)
        infer_loader = DataLoader(infer_dataset,
                                  shuffle=False,
                                  batch_size=batch_size,
                                  num_workers=2)

        self.model.eval()
        with torch.no_grad():
            for n_batch, data in enumerate(infer_loader):
                x = data["image"]
                x = x.to(self.device)
                logit = self.model(x)
                logit = logit.detach().cpu()

                for i, cert_value in enumerate(logit):
                    sample = infer_list[batch_size * n_batch + i]
                    sample[uncert_name] = 1 - cert_value
Ejemplo n.º 7
0
    def train(self, cur_iter, n_epoch, batch_size, n_worker, n_passes=0):
        if len(self.memory_list) > 0:
            mem_dataset = ImageDataset(
                pd.DataFrame(self.memory_list),
                dataset=self.dataset,
                transform=self.train_transform,
            )
            memory_loader = DataLoader(
                mem_dataset,
                shuffle=True,
                batch_size=(batch_size // 2),
                num_workers=n_worker,
            )
            stream_batch_size = batch_size - batch_size // 2
        else:
            memory_loader = None
            stream_batch_size = batch_size

        # train_list == streamed_list in RM
        train_list = self.streamed_list
        test_list = self.test_list
        random.shuffle(train_list)
        # Configuring a batch with streamed and memory data equally.
        train_loader, test_loader = self.get_dataloader(
            stream_batch_size, n_worker, train_list, test_list
        )

        logger.info(f"Streamed samples: {len(self.streamed_list)}")
        logger.info(f"In-memory samples: {len(self.memory_list)}")
        logger.info(f"Train samples: {len(train_list)+len(self.memory_list)}")
        logger.info(f"Test samples: {len(test_list)}")

        # TRAIN
        best_acc = 0.0
        eval_dict = dict()
        self.model = self.model.to(self.device)
        for epoch in range(n_epoch):
            # initialize for each task
            if epoch <= 0:  # Warm start of 1 epoch
                for param_group in self.optimizer.param_groups:
                    param_group["lr"] = self.lr * 0.1
            elif epoch == 1:  # Then set to maxlr
                for param_group in self.optimizer.param_groups:
                    param_group["lr"] = self.lr
            else:  # Aand go!
                self.scheduler.step()

            train_loss, train_acc = self._train(train_loader=train_loader, memory_loader=memory_loader,
                                                optimizer=self.optimizer, criterion=self.criterion)
            eval_dict = self.evaluation(
                test_loader=test_loader, criterion=self.criterion
            )
            writer.add_scalar(f"task{cur_iter}/train/loss", train_loss, epoch)
            writer.add_scalar(f"task{cur_iter}/train/acc", train_acc, epoch)
            writer.add_scalar(f"task{cur_iter}/test/loss", eval_dict["avg_loss"], epoch)
            writer.add_scalar(f"task{cur_iter}/test/acc", eval_dict["avg_acc"], epoch)
            writer.add_scalar(
                f"task{cur_iter}/train/lr", self.optimizer.param_groups[0]["lr"], epoch
            )

            logger.info(
                f"Task {cur_iter} | Epoch {epoch+1}/{n_epoch} | train_loss {train_loss:.4f} | train_acc {train_acc:.4f} | "
                f"test_loss {eval_dict['avg_loss']:.4f} | test_acc {eval_dict['avg_acc']:.4f} | "
                f"lr {self.optimizer.param_groups[0]['lr']:.4f}"
            )

            best_acc = max(best_acc, eval_dict["avg_acc"])

        return best_acc, eval_dict
Ejemplo n.º 8
0
        check_point = torch.load(opt.model_path.format(opt.last_epoch))
        model.load_state_dict(check_point['state_dict'])
        optimizer.load_state_dict(check_point['optimizer'])
        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer,
            milestones=[opt.total_epoch],
            gamma=0.1,
            last_epoch=opt.last_epoch)
    else:
        # 学习率更新
        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=[opt.total_epoch], gamma=0.1)

    # 加载训练样本
    train_set = ImageDataset(file_name=opt.train_path,
                             length=5000,
                             class_num=opt.class_num)
    # DataLoader将数据根据batch大小、是否shuffle等封装成一个Batch大小的Tensor,用于后面的训练
    lmdb_train = DataLoader(
        train_set,  # 数据加载
        batch_size=opt.train_batch_size,  # 批处理大小
        shuffle=True,  # 是否进行洗牌操作
        num_workers=0)  # 是否多进程加载数据设置

    # 测试样本
    test_set = ImageDataset(file_name=opt.test_path,
                            length=1000,
                            class_num=opt.class_num)
    lmdb_test = DataLoader(test_set,
                           batch_size=opt.test_batch_size,
                           shuffle=False,
Ejemplo n.º 9
0
    def train(self, cur_iter, n_epoch, batch_size, n_worker, n_passes=1):

        logger.info(f"Streamed samples: {len(self.streamed_list)}")
        logger.info(f"In-memory samples: {len(self.memory_list)}")
        logger.info(f"Test samples: {len(self.test_list)}")

        random.shuffle(self.streamed_list)

        test_list = self.test_list
        train_list = self.streamed_list + self.memory_list
        logger.info("[Task {}] self.training_list length: {}".format(
            cur_iter, len(train_list)))

        train_loader, test_loader = self.get_dataloader(
            batch_size, n_worker, train_list, test_list)

        # TRAIN
        best_acc = 0.0
        self.model = self.model.to(self.device)
        for epoch in range(n_epoch):
            # https://github.com/drimpossible/GDumb/blob/master/src/main.py
            # initialize for each task
            if epoch <= 0:  # Warm start of 1 epoch
                for param_group in self.optimizer.param_groups:
                    param_group["lr"] = self.lr * 0.1
            elif epoch == 1:  # Then set to maxlr
                for param_group in self.optimizer.param_groups:
                    param_group["lr"] = self.lr
            else:  # Aand go!
                self.scheduler.step()

            train_loss = self._train(
                train_loader=train_loader,
                optimizer=self.optimizer,
                criterion=self.criterion,
                epoch=epoch,
                total_epochs=n_epoch,
                cur_iter=cur_iter,
                n_passes=n_passes,
            )
            eval_dict = self.evaluation(test_loader=test_loader,
                                        criterion=self.criterion)

            writer.add_scalar(f"task{cur_iter}/train/loss", train_loss, epoch)
            writer.add_scalar(f"task{cur_iter}/train/lr",
                              self.optimizer.param_groups[0]["lr"], epoch)
            writer.add_scalar(f"task{cur_iter}/test/acc", eval_dict["avg_acc"],
                              epoch)

            logger.info(
                f"Task {cur_iter} | Epoch {epoch+1}/{n_epoch} | train_loss {train_loss:.4f} | "
                f"test_acc {eval_dict['avg_acc']:.4f} | train_lr {self.optimizer.param_groups[0]['lr']:.4f}"
            )

        if cur_iter == 0:
            n_sample = self.valid_size // len(self.exposed_classes)
            logger.info(
                f"[task {cur_iter}] num samples for bias correction : {n_sample}"
            )
            self.construct_correction_examplers(num_sample=n_sample)

        else:
            n_sample = self.valid_size // len(self.learned_classes)
            logger.info(
                f"[task {cur_iter}] num samples for bias correction : {n_sample}"
            )
            self.construct_correction_examplers(num_sample=n_sample)

            correction_df = pd.DataFrame(self.valid_list)
            correction_dataset = ImageDataset(correction_df,
                                              dataset=self.dataset,
                                              transform=self.train_transform)
            correction_loader = DataLoader(correction_dataset,
                                           shuffle=True,
                                           batch_size=100,
                                           num_workers=2)
            self.bias_correction(
                bias_loader=correction_loader,
                test_loader=test_loader,
                criterion=self.criterion,
                n_epoch=n_epoch,
            )

        eval_dict = self.evaluation(test_loader=test_loader,
                                    criterion=self.criterion)
        if best_acc < eval_dict["avg_acc"]:
            best_acc = eval_dict["avg_acc"]
            self.prev_model = deepcopy(self.model)
        return best_acc, eval_dict
        return self.loss_layer([input,label])


if __name__ == "__main__":

    model = ResnetEncoderDecoder()


    optimizer = tf.train.RMSPropOptimizer(learning_rate=0.0001)
    checkpoint_path = "./checkpoints/train"
    ckpt = tf.train.Checkpoint(model=model,
                            optimizer=optimizer,
                            step=tf.train.get_or_create_global_step())
    ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)

    train_set = ImageDataset(data_path = opt.train_path, char_path="utils/char.txt", batch_size=128, training=True).data_generation()
    test_set = ImageDataset(data_path = opt.test_path, char_path="utils/char.txt", batch_size=128, training=False).data_generation()

    start_epoch = 0
    if ckpt_manager.latest_checkpoint:
        ckpt.restore(ckpt_manager.latest_checkpoint)
        start_epoch = int(ckpt_manager.latest_checkpoint.split('-')[-1])
        print (f'Latest checkpoint restored!!\n\tModel path is {ckpt_manager.latest_checkpoint}')

    epochs = 100000
    for epoch in range(start_epoch, epochs):
        loss_history = []
        for step, (inputs, labels) in enumerate(train_set):
            # print (inputs)
            with tf.GradientTape() as tape:
                loss = model(inputs["images"], training=True)