コード例 #1
0
ファイル: mainMiccaiSeg.py プロジェクト: shunkaiyu/segnet
def validate(val_loader, model, criterion, epoch, key, evaluator):
    '''
        Run evaluation
    '''

    # Switch to evaluate mode
    model.eval()

    for i, (img, gt) in enumerate(val_loader):

        # Process the network inputs and outputs
        img = utils.normalize(img, torch.Tensor([0.295, 0.204, 0.197]), torch.Tensor([0.221, 0.188, 0.182]))
        gt_temp = gt * 255
        label = utils.generateLabel4CE(gt_temp, key)
        oneHotGT = utils.generateOneHot(gt_temp, key)

        img, label = Variable(img), Variable(label)

        if use_gpu:
            img = img.cuda()
            label = label.cuda()

        # Compute output
        seg = model(img)
        loss = model.dice_loss(seg, label)

        print('[%d/%d][%d/%d] Loss: %.4f'
              % (epoch, args.epochs-1, i, len(val_loader)-1, loss.mean().data))

        utils.displaySamples(img, seg, gt, use_gpu, key, args.saveTest, epoch,
                             i, args.save_dir)
        evaluator.addBatch(seg, oneHotGT)
コード例 #2
0
def train(train_loader, model, criterion, optimizer, scheduler, epoch, key):
    '''
        Run one training epoch
    '''

    # Switch to train mode
    model.train()

    for i, (img, seg_gt, class_gt) in enumerate(train_loader):

        # For TenCrop Data Augmentation
        img = img.view(-1, 3, args.resizedImageSize, args.resizedImageSize)
        img = utils.normalize(img, torch.Tensor([0.295, 0.204, 0.197]),
                              torch.Tensor([0.221, 0.188, 0.182]))
        seg_gt = seg_gt.view(-1, 3, args.resizedImageSize,
                             args.resizedImageSize)

        # Process the network inputs and outputs
        gt_temp = seg_gt * 255
        seg_label = utils.generateLabel4CE(gt_temp, key)

        class_label = class_gt
        for _ in range(9):
            class_label = torch.cat((class_label, class_gt), 0)

        img, seg_label, class_label = Variable(img), Variable(
            seg_label), Variable(class_label).float()

        if use_gpu:
            img = img.cuda()
            seg_label = seg_label.cuda()
            class_label = class_label.cuda()

        # Compute output
        classified, segmented = model(img)
        seg_loss = model.dice_loss(segmented, seg_label)
        class_loss = criterion(classified, class_label)
        total_loss = seg_loss + class_loss

        # Compute gradient and do SGD step
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

        scheduler.step(total_loss.mean().data)

        print(
            '[{:d}/{:d}][{:d}/{:d}] Total Loss: {:.4f}, Segmentation Loss: {:.4f}, Classification Loss: {:.4f}'
            .format(epoch, args.epochs - 1, i,
                    len(train_loader) - 1,
                    total_loss.mean().data,
                    seg_loss.mean().data,
                    class_loss.mean().data))

        utils.displaySamples(img, segmented, seg_gt, use_gpu, key, False,
                             epoch, i, args.save_dir)
コード例 #3
0
ファイル: main.py プロジェクト: semchan/capsNet
def train(train_loader, model, optimizer, epoch, key):
    '''
        Run one training epoch
    '''
    model.train()
    for i, (data, target) in enumerate(train_loader):

        # Generate the target vector from the groundtruth image
        # Multiplication by 255 to convert from float to unit8
        target_temp = target * 255
        label = utils.generateGTmask(target_temp, key)
        print(torch.max(label))

        if use_gpu:
            data = data.cuda()
            label = label.cuda()

        #gt.view(-1)
        #print(target)
        data, label = Variable(data), Variable(label, requires_grad=False)
        label = label.float()
        optimizer.zero_grad()
        if args.with_reconstruction:
            output, probs = model(data, label)
            loss = F.mse_loss(output, label)
            # margin_loss = loss_fn(probs, target)
            # loss = reconstruction_alpha * reconstruction_loss + margin_loss

        # if args.verbose:
        print(output[0, 3000:3020])
        print(label[0, 3000:3020])

        loss.backward()
        optimizer.step()

        print('[%d/%d][%d/%d] Loss: %.4f' %
              (epoch, args.epochs, i, len(train_loader), loss.mean().data[0]))
        if i % args.print_freq == 0:
            #    vutils.save_image(real_cpu,
            #            '%s/real_samples.png' % args.save_dir,
            #            normalize=True)
            #    #fake = netG(fixed_noise)
            #    vutils.save_image(fake.data,
            #            '%s/fake_samples_epoch_%03d.png' % (args.save_dir, epoch),
            #            normalize=True)
            utils.displaySamples(data, output, target, use_gpu, key)
コード例 #4
0
def train(train_loader, model, criterion, optimizer, scheduler, epoch, key):
    '''
        Run one training epoch
    '''

    # Switch to train mode
    model.train()
    epoch_loss = 0
    for i, (img, gt) in enumerate(train_loader):

        # For TenCrop Data Augmentation
        img = img.view(-1,3,args.resizedImageSize,args.resizedImageSize)
        img = utils.normalize(img, torch.Tensor([0.295, 0.204, 0.197]), torch.Tensor([0.221, 0.188, 0.182]))
        gt = gt.view(-1,3,args.resizedImageSize,args.resizedImageSize)

        # Process the network inputs and outputs
        gt_temp = gt * 255
        label = utils.generateLabel4CE(gt_temp, key)
        oneHotGT = utils.generateOneHot(gt_temp, key)

        img, label = Variable(img), Variable(label)

        if use_gpu:
            img = img.cuda()
            label = label.cuda()

        # Compute output
        seg = model(img)
        loss = model.dice_loss(seg, label)

        # Compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        scheduler.step(loss.mean().item())
        epoch_loss += loss.mean().item()

        print('[%d/%d][%d/%d] Loss: %.4f'
              % (epoch, args.epochs-1, i, len(train_loader)-1, loss.mean().item()))
        utils.displaySamples(img, seg, gt, use_gpu, key, False, epoch,
                             i, args.save_dir)

    writer.add_scalar('Train Epoch Loss', epoch_loss / (i+1), epoch)
コード例 #5
0
def validate(val_loader, model, criterion, epoch, key, evaluator):
    '''
        Run evaluation
    '''

    # Switch to evaluate mode
    model.eval()

    for i, (img, seg_gt, class_gt) in enumerate(val_loader):

        # Process the network inputs and outputs
        img = utils.normalize(img, torch.Tensor([0.295, 0.204, 0.197]),
                              torch.Tensor([0.221, 0.188, 0.182]))
        gt_temp = seg_gt * 255
        seg_label = utils.generateLabel4CE(gt_temp, key)
        oneHotGT = utils.generateOneHot(gt_temp, key)

        img, seg_label, class_label = Variable(img), Variable(
            seg_label), Variable(class_gt).float()

        if use_gpu:
            img = img.cuda()
            seg_label = seg_label.cuda()
            class_label = class_label.cuda()

        # Compute output
        classified, segmented = model(img)
        seg_loss = model.dice_loss(segmented, seg_label)
        class_loss = criterion(classified, class_label)
        total_loss = seg_loss + class_loss

        print(
            '[{:d}/{:d}][{:d}/{:d}] Total Loss: {:.4f}, Segmentation Loss: {:.4f}, Classification Loss: {:.4f}'
            .format(epoch, args.epochs - 1, i,
                    len(val_loader) - 1,
                    total_loss.mean().data,
                    seg_loss.mean().data,
                    class_loss.mean().data))

        utils.displaySamples(img, segmented, seg_gt, use_gpu, key,
                             args.saveTest, epoch, i, args.save_dir)
        evaluator.addBatch(segmented, oneHotGT)
コード例 #6
0
def train(train_loader, netG, netG2, netD, criterion, criterion_L1, optimizerG,
          optimizerG2, optimizerD, epoch, input, noise, fixed_noise, label,
          nz):
    '''
        Run one training epoch
    '''

    for i, (img, gt) in enumerate(train_loader):

        # Generate smoothed labels
        if args.labelSmoothing:
            real_label = random.uniform(0.7, 1.2)
            fake_label = random.uniform(0.0, 0.3)
        else:
            real_label = 1
            fake_label = 0

        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
        # train with real
        gtForViz = gt
        if args.verbose:
            print('GT shape')
            print(gt.shape)
        if use_gpu:
            img = img.cuda()
            gt = gt.cuda()
        netD.zero_grad()
        real_cpu = img
        batch_size = real_cpu.size(0)
        if use_gpu:
            real_cpu = real_cpu.cuda()
        input.resize_as_(real_cpu).copy_(real_cpu)
        #input.normal_(-1, 1)
        label.resize_(batch_size).fill_(real_label)
        inputv = Variable(input)
        labelv = Variable(label)

        output = netD(inputv)
        if args.verbose:
            print('Output size - real: ')
            print(output.data.shape)
        errD_real = criterion(output, labelv)
        errD_real.backward()
        D_x = output.data.mean()

        # train with fake
        if args.inputType == 'segM':
            noise = gt
            if args.verbose:
                print('GT noise shape')
                print(gt.shape)
            if args.verbose:
                print('Noise Reshaped: ')
                print(noise.shape)
        else:
            noise = torch.randn(batch_size, 3, args.imageSize, args.imageSize)

        netG.zero_grad()
        if use_gpu:
            noise = noise.cuda()
        #noiseForViz = noise.resize_(batch_size, nz, 1, 1)
        #noise.normal_(-1, 1)
        noisev = Variable(noise)
        fake = netG(noisev)
        if args.verbose:
            print('Fake img size: ')
            print(fake.data.shape)
        labelv = Variable(label.fill_(fake_label))
        output = netD(fake.detach())
        if args.verbose:
            print('Output size - fake: ')
            print(output.data.shape)
        if args.useL1:
            errL1 = criterion_L1(input.normal_(-1, 1), fake.detach())
            errD_fake = criterion(output, labelv) + args.Lambda * errL1
        else:
            errD_fake = criterion(output, labelv)
        errD_fake.backward()
        D_G_z1 = output.data.mean()
        if args.useL1:
            errD = errD_real + errD_fake + args.Lambda * errL1
        else:
            errD = errD_real + errD_fake
        optimizerD.step()

        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        # Run the 2nd G network to get back the segmentation mask
        netG2.zero_grad()
        genSeg = netG2(fake)
        errL1G2 = criterion_L1(gt.normal_(0, 1), genSeg.detach())

        netG.zero_grad()
        labelv = Variable(
            label.fill_(real_label))  # fake labels are real for generator cost
        output = netD(fake)
        if args.useL1:
            errG = criterion(output, labelv) + args.Lambda * (errL1 + errL1G2)
        else:
            errG = criterion(output, labelv)
        errG.backward()
        D_G_z2 = output.data.mean()
        optimizerG.step()
        optimizerG2.step()

        if args.useL1:
            print(
                '[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f Loss_L1: %.4f Loss_L1G2: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                % (epoch, args.epochs, i, len(train_loader), errD.data[0],
                   errG.data[0], errL1.data[0], errL1G2.data[0], D_x, D_G_z1,
                   D_G_z2))
        else:
            print(
                '[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                % (epoch, args.epochs, i, len(train_loader), errD.data[0],
                   errG.data[0], D_x, D_G_z1, D_G_z2))
        if i % args.print_freq == 0:
            vutils.save_image(real_cpu,
                              '%s/real_samples.png' % args.save_dir,
                              normalize=True)
            #fake = netG(fixed_noise)
            vutils.save_image(fake.data,
                              '%s/fake_samples_epoch_%03d.png' %
                              (args.save_dir, epoch),
                              normalize=True)
            utils.displaySamples(real_cpu, fake, genSeg, gtForViz, use_gpu)
コード例 #7
0
def train(train_loader, netG, netD, criterion, optimizerG, optimizerD, epoch,
          input, noise, fixed_noise, label, real_label, fake_label, nz):
    '''
        Run one training epoch
    '''

    for i, (img, gt) in enumerate(train_loader):

        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
        # train with real
        gtForViz = gt
        gt = gt.view(-1, 1).squeeze(1)
        if args.verbose:
            print('GT shape')
            print(gt.shape)
        if use_gpu:
            img = img.cuda()
            gt = gt.cuda()
        netD.zero_grad()
        real_cpu = img
        batch_size = real_cpu.size(0)
        if use_gpu:
            real_cpu = real_cpu.cuda()
        input.resize_as_(real_cpu).copy_(real_cpu)
        label.resize_(batch_size).fill_(real_label)
        inputv = Variable(input)
        labelv = Variable(label)

        output = netD(inputv)
        if args.verbose:
            print('Output size - real: ')
            print(output.data.shape)
        errD_real = criterion(output, labelv)
        errD_real.backward()
        D_x = output.data.mean()

        # train with fake'
        noise = gt
        if args.verbose:
            print('GT noise shape')
            print(gt.shape)
        noiseForViz = noise.resize_(batch_size, nz, 1, 1)
        noise.resize_(batch_size, nz, 1, 1).normal_(0, 1)
        if args.verbose:
            print('Noise Reshaped: ')
            print(noise.shape)
        noisev = Variable(noise)
        fake = netG(noisev)
        print(fake)
        if args.verbose:
            print('Fake img size: ')
            print(fake.data.shape)
        labelv = Variable(label.fill_(fake_label))
        output = netD(fake.detach())
        if args.verbose:
            print('Output size - fake: ')
            print(output.data.shape)
        errD_fake = criterion(output, labelv)
        errD_fake.backward()
        D_G_z1 = output.data.mean()
        errD = errD_real + errD_fake
        optimizerD.step()

        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        netG.zero_grad()
        labelv = Variable(
            label.fill_(real_label))  # fake labels are real for generator cost
        output = netD(fake)
        errG = criterion(output, labelv)
        errG.backward()
        D_G_z2 = output.data.mean()
        optimizerG.step()

        print(
            '[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
            % (epoch, args.epochs, i, len(train_loader), errD.data[0],
               errG.data[0], D_x, D_G_z1, D_G_z2))
        if i % args.print_freq == 0:
            vutils.save_image(real_cpu,
                              '%s/real_samples.png' % args.save_dir,
                              normalize=True)
            #fake = netG(fixed_noise)
            vutils.save_image(fake.data,
                              '%s/fake_samples_epoch_%03d.png' %
                              (args.save_dir, epoch),
                              normalize=True)
            utils.displaySamples(real_cpu, fake, noiseForViz, gtForViz,
                                 use_gpu)