コード例 #1
0
ファイル: model_api.py プロジェクト: billbai0102/SGMT
class Model:
    def __init__(self, train_dl, val_dl):
        self.device = ('cuda:0' if torch.cuda.is_available() else 'cpu')
        self.train_dl = train_dl
        self.val_dl = val_dl

        self.loss = Loss()
        self.net = UNet(1).to(self.device)
        self.net.apply(Model._init_weights)
        self.criterion = self.loss.BCEDiceLoss
        self.optim = None
        self.scheduler = None

        self._init_optim(LR, BETAS)

        self.cycles = 0
        self.hist = {'train': [], 'val': [], 'loss': []}

        utils.create_dir('./pt')
        utils.log_data_to_txt('train_log', f'\nUsing device {self.device}')

    def _init_optim(self, lr, betas):
        self.optim = optim.Adam(utils.filter_gradients(self.net), lr=lr)

        self.scheduler = optim.lr_scheduler.StepLR(self.optim,
                                                   step_size=100,
                                                   gamma=.75)

    def _save_models(self):
        utils.save_state_dict(self.net, 'model', './pt')
        utils.save_state_dict(self.optim, 'optim', './pt')
        utils.save_state_dict(self.scheduler, 'scheduler', './pt')

    def train(self, epochs):
        self.net.train()
        for epoch in range(epochs):
            self.net.train()
            for idx, data in enumerate(self.train_dl):
                batch_time = time.time()

                self.cycles += 1
                print(self.cycles)

                image = data['MRI'].to(self.device)
                target = data['Mask'].to(self.device)

                output = self.net(image)

                output_rounded = np.copy(output.data.cpu().numpy())
                output_rounded[np.nonzero(output_rounded < 0.5)] = 0.
                output_rounded[np.nonzero(output_rounded >= 0.5)] = 1.
                train_f1 = self.loss.F1_metric(output_rounded,
                                               target.data.cpu().numpy())

                loss = self.criterion(output, target)

                self.hist['train'].append(train_f1)
                self.hist['loss'].append(loss.item())

                self.optim.zero_grad()
                loss.backward()
                self.optim.step()
                self.scheduler.step()

                if self.cycles % 100 == 0:
                    self._save_models()
                    val_f1 = self.evaluate()
                    utils.log_data_to_txt(
                        'train_log',
                        f'\nEpoch: {epoch}/{epochs} - Batch: {idx * BATCH_SIZE}/{len(self.train_dl.dataset)}'
                        f'\nLoss: {loss.mean().item():.4f}'
                        f'\nTrain F1: {train_f1:.4f} - Val F1: {val_f1}'
                        f'\nTime taken: {time.time() - batch_time:.4f}s')

    def evaluate(self):
        # model.eval()
        loss_v = 0

        with torch.no_grad():
            for idx, data in enumerate(self.val_dl):
                image, target = data['MRI'], data['Mask']

                image = image.to(self.device)
                target = target.to(self.device)

                outputs = self.net(image)

                out_thresh = np.copy(outputs.data.cpu().numpy())
                out_thresh[np.nonzero(out_thresh < .3)] = 0.0
                out_thresh[np.nonzero(out_thresh >= .3)] = 1.0

                loss = self.loss.F1_metric(out_thresh,
                                           target.data.cpu().numpy())
                loss_v += loss

        return loss_v / idx

    @classmethod
    def _init_weights(cls, layer: nn.Module):
        name = layer.__class__.__name__
        if name.find('Conv') != -1 and name.find('2d') != -1:
            nn.init.normal_(layer.weight.data, .0, 2e-2)
        if name.find('BatchNorm') != -1:
            nn.init.normal_(layer.weight.data, 1.0, 2e-2)
            nn.init.constant_(layer.bias.data, .0)
コード例 #2
0
ファイル: pix2pix.py プロジェクト: blacknwhite5/GANs-pytorch
def main():
    # 네트워크
    G = UNet().to(device)
    D = Discriminator().to(device)

    # 네트워크 초기화
    G.apply(weight_init)
    D.apply(weight_init)

    # pretrained 모델 불러오기
    if args.reuse:
        assert os.path.isfile(args.save_path), '[!]Pretrained model not found'
        checkpoint = torch.load(args.save_path)
        G.load_state_dict(checkpoint['G'])
        D.load_state_dict(checkpoint['D'])
        print('[*]Pretrained model loaded')

    # optimizer
    G_optim = optim.Adam(G.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    D_optim = optim.Adam(D.parameters(), lr=args.lr, betas=(args.b1, args.b2))

    for epoch in range(args.num_epoch):
        for i, imgs in enumerate(dataloader['train']):
            A = imgs['A'].to(device)
            B = imgs['B'].to(device)

            # # # # #
            # Discriminator
            # # # # #
            G.eval()
            D.train()

            fake = G(B)
            D_fake = D(fake, B)
            D_real = D(A, B)

            # original loss D
            loss_D = -((D_real.log() + (1 - D_fake).log()).mean())

            #            # LSGAN loss D
            #            loss_D = ((D_real - 1)**2).mean() + (D_fake**2).mean()

            D_optim.zero_grad()
            loss_D.backward()
            D_optim.step()

            # # # # #
            # Generator
            # # # # #
            G.train()
            D.eval()

            fake = G(B)
            D_fake = D(fake, B)

            # original loss G
            loss_G = -(D_fake.mean().log()
                       ) + args.lambda_recon * torch.abs(A - fake).mean()

            #            # LSGAN loss G
            #            loss_G = ((D_fake-1)**2).mean() + args.lambda_recon * torch.abs(A - fake).mean()

            G_optim.zero_grad()
            loss_G.backward()
            G_optim.step()

            # 학습 진행사항 출력
            print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
                  (epoch, args.num_epoch, i * args.batch_size,
                   len(datasets['train']), loss_D.item(), loss_G.item()))

        # 이미지 저장 (save per epoch)
        val = next(iter(dataloader['test']))
        real_A = val['A'].to(device)
        real_B = val['B'].to(device)

        with torch.no_grad():
            fake_A = G(real_B)
        save_image(torch.cat([real_A, real_B, fake_A], dim=3),
                   'images/{0:03d}.png'.format(epoch + 1),
                   nrow=2,
                   normalize=True)

        # 모델 저장
        torch.save({
            'G': G.state_dict(),
            'D': D.state_dict(),
        }, args.save_path)