Пример #1
0
import numpy as np
import matplotlib.pyplot as plt
import torch
from model import define_G

## load sample
a = np.load('./brats18_dataset/sample.npy').item()
t2 = ((torch.from_numpy(a['t2'])[np.newaxis, np.newaxis, :] - 0.5) /
      0.5).float().cuda()
## load model
model_path = './weight/generator_t2_tumor.pth'
gen = define_G(
    4,
    1,
    64,
    'unet_128',
    norm='instance',
)
gen.load_state_dict(torch.load(model_path))
gen.cuda()
gen.eval()

## predict flair using t2
c = torch.zeros(1, 3).cuda()
c[np.arange(t2.size(0)), 0] = 1
f_pred = (gen(t2, c).squeeze().data.cpu().numpy() + 1) / 2

## predict t1ce using t2
c = torch.zeros(1, 3).cuda()
c[np.arange(t2.size(0)), 1] = 1
t1ce_pred = (gen(t2, c).squeeze().data.cpu().numpy() + 1) / 2
Пример #2
0
    './size_64/pix2pix/',
    './size_64/pix2pix_softmax/',
    #'./size_64/Wpix2pix/',
    #'./size_64/Wpix2pix_softmax/',
    './size_64/WUnet_softmax/',
    './size_64/Wpix2pix_path/',
    # './size_64/Wpix2pix_ppath/',
    # './size_64/Wpix2pix_ppath_len/',
]

models = [
    define_G(channels,
             channels,
             64,
             'batch',
             False,
             'normal',
             0.02,
             gpu_id=device,
             use_ce=False,
             unet=False),
    define_G(channels,
             num_classes,
             64,
             'batch',
             False,
             'normal',
             0.02,
             gpu_id=device,
             use_ce=True,
             unet=False),
    #define_G(channels, channels, 64, 'batch', False, 'normal', 0.02, gpu_id=device, use_ce=False, unet=False),
Пример #3
0
    def __init__(self, opt):
        """Pix2PIxHD model

        Parameters
        ----------
        opt : ArgumentParsee
            option of this Model. e.g.)  gain, isAffine

        """
        super(Pix2PixHDModel, self).__init__()
        self.opt = opt

        if opt.gpu_ids == 0:
            self.device = torch.device("cuda:0")
        elif opt.gpu_ids == 1:
            self.device = torch.device("cuda:1")
        else:
            self.device = torch.device("cpu")

        # define networks respectively
        input_nc = opt.label_num
        if not opt.no_use_feature:
            input_nc += opt.feature_nc
        if not opt.no_use_edge:
            input_nc += 1
        self.netG = define_G(
            input_nc=input_nc,
            output_nc=opt.output_nc,
            ngf=opt.ngf,
            g_type=opt.g_type,
            device=self.device,
            isAffine=opt.isAffine,
            use_relu=opt.use_relu,
        )

        input_nc = opt.output_nc
        if not opt.no_use_edge:
            input_nc += opt.label_num + 1
        else:
            input_nc += opt.label_num
        self.netD = define_D(
            input_nc=input_nc,
            ndf=opt.ndf,
            n_layers_D=opt.n_layers_D,
            device=self.device,
            isAffine=opt.isAffine,
            num_D=opt.num_D,
        )

        self.netE = define_E(
            input_nc=opt.output_nc,
            feat_num=opt.feature_nc,
            nef=opt.nef,
            device=self.device,
            isAffine=opt.isAffine,
        )

        # define optimizer respectively
        # initialize optimizer G&E
        # if opt.niter_fix_global is True, fix parameters in Global Generator
        if opt.niter_fix_global > 0:
            finetune_list = set()

            params = []
            for key, value in self.netG.named_parameters():
                if key.startswith("model" + str(opt.n_local_enhancers)):
                    params += [value]
                    finetune_list.add(key.split(".")[0])
            print(
                "------------- Only training the local enhancer network (for %d epochs) ------------"
                % opt.niter_fix_global)
            print("The layers that are finetuned are ", sorted(finetune_list))
        else:
            params = list(self.netG.parameters())
        if not self.opt.no_use_feature:
            params += list(self.netE.parameters())
        self.optimizer_G = torch.optim.Adam(params,
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))
        self.scheduler_G = LinearDecayLR(self.optimizer_G,
                                         niter_decay=opt.niter_decay)

        # initialize optimizer D
        # optimizer D
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))
        self.scheduler_D = LinearDecayLR(self.optimizer_D,
                                         niter_decay=opt.niter_decay)

        # defin loss functions
        if opt.gpu_ids == 0 or opt.gpu_ids == 1:
            self.Tensor = torch.cuda.FloatTensor
        else:
            self.Tensor = torch.FloatTensor

        self.criterionGAN = GANLoss(self.device,
                                    use_lsgan=not opt.no_lsgan,
                                    tensor=self.Tensor)
        if not self.opt.no_fmLoss:
            self.criterionFM = FMLoss(num_D=opt.num_D,
                                      n_layers=opt.n_layers_D,
                                      lambda_feat=opt.lambda_feat)
        if not self.opt.no_pLoss:
            self.criterionP = PerceptualLoss(
                self.device, lambda_perceptual=opt.lambda_perceptual)
Пример #4
0
def train(img_size=64, channels=1, num_classes=3, batch_size=32,
          dataset_dir='./size_64/20_den/', result_folder='./size_64/Wpix2pix_ppath/',
          epoch_count=1, niter=100, niter_decay=100, lr_decay_iters=50):

    os.makedirs(result_folder, exist_ok=True)

    # Dataset loader
    training_data_loader = DataLoader(ImageDataset(dataset_dir, img_size=img_size),
                                      batch_size=batch_size, shuffle=True)
    testing_data_loader = DataLoader(ImageDataset(dataset_dir, mode='val', img_size=img_size),
                                     batch_size=6, shuffle=True, num_workers=1)

    gpu_id = 'cuda:3'
    device = torch.device(gpu_id)

    print('===> Building models')
    net_g = define_G(channels, num_classes, 64, 'batch', False, 'normal', 0.02, gpu_id=gpu_id, use_ce=True, ce=False,
                     unet=False)
    net_d = define_D(channels, 64, 'basic', gpu_id=gpu_id)

    weight = torch.FloatTensor([1, 1, 1]).to(device)

    criterionGAN = GANLoss().to(device)
    criterionL1 = nn.L1Loss().to(device)
    criterionMSE = nn.MSELoss().to(device)
    criterionCE = nn.CrossEntropyLoss(weight=weight).to(device)

    lr = 0.0002
    beta1 = 0.5
    lr_policy = 'lambda'

    # setup optimizer
    optimizer_g = optim.Adam(net_g.parameters(), lr=lr, betas=(beta1, 0.999))
    optimizer_d = optim.Adam(net_d.parameters(), lr=lr, betas=(beta1, 0.999))
    net_g_scheduler = get_scheduler(optimizer_g, lr_policy)
    net_d_scheduler = get_scheduler(optimizer_d, lr_policy)

    loss_history = {'G': [], 'D': [], 'p': [], 'adv': [], 'valPSNR': []}

    for epoch in range(epoch_count, niter + niter_decay + 1):
        # train
        for iteration, batch in enumerate(training_data_loader, 1):
            # forward
            real_a, real_b, path = batch[0].to(device), batch[1].to(device), batch[2].to(device)
            # imshow(torch.cat((real_a[0], real_b[0]), -1).cpu().detach().numpy().reshape(img_size, img_size * 2))
            # imshow(real_b[0].cpu().detach().numpy().reshape(img_size, img_size))

            output = net_g(real_a)

            # fake_b = output
            fake_b = torch.max(output, 1, keepdim=True)[1].float()
            fake_path = torch.where(fake_b == 0, torch.ones_like(fake_b).to(device),
                                    torch.zeros_like(fake_b).to(device)).to(device)

            ######################
            # (1) Update D network
            ######################

            optimizer_d.zero_grad()

            # train with fake
            # fake_ab = torch.cat((real_a, fake_b), 1)
            # fake_ab = torch.cat((real_a, fake_path), 1)
            # pred_fake = net_d.forward(fake_ab.detach())
            # pred_fake = net_d.forward(fake_b.detach())

            pred_fake = net_d.forward(fake_path.detach())
            loss_d_fake = criterionGAN(pred_fake, False)

            # train with real
            # eal_ab = torch.cat((real_a, real_b), 1)
            # real_ab = torch.cat((real_a, path), 1)
            # pred_real = net_d.forward(real_ab)
            # pred_real = net_d.forward(real_b)

            pred_real = net_d.forward(path)
            loss_d_real = criterionGAN(pred_real, True)

            # Combined D loss
            loss_d = (loss_d_fake + loss_d_real) * 0.5
            loss_d.backward()

            # gradient_penalty = calculate_gradient_penalty(net_d, real_a.data, real_b.data, fake_b.data)
            gradient_penalty = calculate_gradient_penalty(net_d, real_a.data, path.data, fake_path.data)
            gradient_penalty.backward()

            optimizer_d.step()

            ######################
            # (2) Update G network
            ######################

            optimizer_g.zero_grad()

            # First, G(A) should fake the discriminator
            # fake_ab = torch.cat((real_a, fake_b), 1)
            # fake_ab = torch.cat((real_a, fake_path), 1)
            # pred_fake = net_d.forward(fake_ab)
            # pred_fake = net_d.forward(fake_b)

            pred_fake = net_d.forward(fake_path)
            loss_g_gan = criterionGAN(pred_fake, True)

            # Second, G(A) = B
            loss_g_l1 = criterionL1(fake_b, real_b)
            loss_g_ce = criterionCE(output, real_b[:, 0, ...].long()) * 10
            loss_len = (torch.mean(path) - torch.mean(fake_path)).pow(2)
            loss_g = loss_g_gan + loss_g_ce # + loss_len

            loss_g.backward()

            optimizer_g.step()


            # print("===> Epoch[{}]({}/{}): Loss_D: {:.4f} Loss_G: {:.4f}".format(epoch,
            #                                                                     iteration,
            #                                                                     len(training_data_loader),
            #                                                                     loss_d.item(),
            #                                                                     loss_g.item()))

            loss_history['D'].append(loss_d.item())
            loss_history['G'].append(loss_g.item())
            loss_history['p'].append(loss_g_l1.item())

            # if iteration % 50 == 0:
            #     clear_output(True)
            #     plt.figure(figsize=[6, 4])
            #     plt.title("G vs D losses over time")
            #     plt.plot(loss_history['D'], linewidth=2, label='Discriminator')
            #     plt.plot(loss_history['G'], linewidth=2, label='Generator')
            #     plt.legend()
            #     plt.show()

        update_learning_rate(net_g_scheduler, optimizer_g)
        update_learning_rate(net_d_scheduler, optimizer_d)

        # test
        avg_psnr = 0
        for batch in testing_data_loader:
            input, target, mask = batch[0].to(device), batch[1].to(device), batch[2].to(device)

            output = net_g(input)
            # prediction = output
            prediction = torch.max(output, 1, keepdim=True)[1].float()
            mse = criterionMSE(prediction, target)
            psnr = 10 * log10(1 / (mse.item() + 1e-16))
            avg_psnr += psnr

        loss_history['valPSNR'] += [avg_psnr / len(testing_data_loader)]
        # print(len(testing_data_loader))
        print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr / len(testing_data_loader)))

        #checkpoint
        save_sample(epoch * len(training_data_loader) + iteration, testing_data_loader, dataset_dir, result_folder)
        torch.save(net_g.state_dict(), result_folder + 'generator.pt')
        torch.save(net_d.state_dict(), result_folder + 'discriminator.pt')
        np.save(result_folder + 'loss_history.npy', loss_history)
Пример #5
0
from dataset import GraspingDataset
from loss import GANLoss
from model import define_D, define_G

data_dir = "/content/drive/My Drive/Grasping GAN/processed"
model_dir = "/content/drive/My Drive/Grasping GAN/models"
batch_size = 8
epochs = 1
lr = 0.01

dataset = GraspingDataset(data_dir)
data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

net_g = define_G(3, 3, 64, "batch", False, "normal", 0.02, gpu_id=device)
net_d = define_D(3 + 3, 64, "basic", gpu_id=device)

criterionGAN = GANLoss().to(device)
criterionL1 = nn.L1Loss().to(device)
criterionMSE = nn.MSELoss().to(device)

optimizer_g = optim.Adam(net_g.parameters(), lr=lr)
optimizer_d = optim.Adam(net_d.parameters(), lr=lr)

l1_weight = 10

for epoch in range(epochs):
    # train
    for iteration, batch in enumerate(data_loader, 1):
        # forward
Пример #6
0
        length += np.sqrt((start_x - x) ** 2 + (start_y - y) ** 2)
    return length

img_size = 32
channels = 1
num_classes = 3
result_folder = './validation/ours_64_free_all_tests/'
dataset_dir = './dataset/ours_64_free_/'
device = torch.device("cuda:1")

os.makedirs(result_folder, exist_ok=True)

result_folders = ['./dataset/ours_64_free__results20_den/']

models = [
          define_G(channels, num_classes, img_size, 'batch', False, 'normal', 0.02, gpu_id=device, use_ce=True, unet=False, attn=True)
          ]

for model, path in zip(models, result_folders):
    model.load_state_dict(torch.load(path + 'generator.pt'))

batch_size = 6

val_data_loader = DataLoader(ImageDataset(dataset_dir, mode='eval', img_size=img_size),
                             batch_size=1, shuffle=False, num_workers=1)


focal_astar = {1 : {'nodes': [], 'steps': [], 'length': []},
               1.5 : {'nodes': [], 'steps': [], 'length': []},
               2 : {'nodes': [], 'steps': [], 'length': []},
               5 : {'nodes': [], 'steps': [], 'length': []},