Beispiel #1
0
    def train_epoch(epoch):
        epoch_loss_g = 0
        total_psnr = 0
        for i, batch in enumerate(training_data_loader, 1):
            if i > max_train_iter:
                break
            target = Variable(batch[0])
            input = Variable(batch[1])
            if cuda:
                target = target.cuda()
                input = input.cuda()

            # Train Generator
            optimizerG.zero_grad()
            reconstructed = gennet(input)

            content_loss = content_criterion(reconstructed, target)

            loss = content_loss
            loss.backward()
            optimizerG.step()

            epoch_loss_g += loss.data[0]
            total_psnr += psnr(loss.data[0])

            print("Epoch[{}]({}/{}): Loss(G): {:.4f}".format(
                epoch, i, max_train_iter, loss.data[0]))
        avg_loss_g = epoch_loss_g / max_train_iter
        avg_psnr = total_psnr / max_train_iter

        print("Epoch {} : Avg Loss(G): {:.4f}".format(epoch, avg_loss_g))
        plots.update([epoch, avg_psnr], rind=1, cind=1, lind=1)
        plots.update([epoch, avg_loss_g], rind=1, cind=2, lind=1)
        plots.save()
Beispiel #2
0
    def validate(epoch):
        epoch_loss_g = 0
        total_psnr = 0
        for i, batch in enumerate(validation_data_loader, 1):
            if i > max_val_iter:
                break
            target = Variable(batch[0])
            input = Variable(batch[1])
            if cuda:
                target = target.cuda()
                input = input.cuda()

            # Train Generator
            reconstructed = gennet(input)

            content_loss = content_criterion(reconstructed, target)

            loss = content_loss

            epoch_loss_g += loss.data[0]
            total_psnr += psnr(loss.data[0])

            print("Epoch[{}]({}/{}): Loss(G): {:.4f}".format(
                epoch, i, max_val_iter, loss.data[0]))
        avg_loss_g = epoch_loss_g / max_val_iter
        avg_psnr = total_psnr / max_val_iter

        print("Epoch {} : Avg Loss(G): {:.4f}".format(epoch, avg_loss_g))
        plots.update([epoch, avg_psnr], rind=1, cind=1, lind=2)
        plots.update([epoch, avg_loss_g], rind=1, cind=2, lind=2)
        plots.save()
Beispiel #3
0
    def validate(epoch):
        epoch_loss_g = 0
        epoch_loss_d = 0
        total_psnr = 0
        for i, batch in enumerate(validation_data_loader, 1):
            real_batch_size = batch[0].size(0)
            label.data.resize_(real_batch_size)
            target = Variable(batch[0])
            input = Variable(batch[1])
            if cuda:
                target = target.cuda()
                input = input.cuda()

            # Forward pass for Generator
            reconstructed = gennet(input)

            feature_r = vgg(reconstructed)
            feature_t = vgg(input)

            content_loss = content_criterion(feature_r, feature_t.detach())

            label.data.fill_(real_label)
            adversarial_loss = adversarial_criterion(disnet(reconstructed),
                                                     label)

            loss = content_loss + 1e-3 * adversarial_loss

            # Forward pass for Discriminator
            reconstructed = gennet(input)

            label.data.fill_(fake_label)
            fake_loss = adversarial_criterion(disnet(reconstructed.detach()),
                                              label)

            label.data.fill_(real_label)
            real_loss = adversarial_criterion(disnet(target), label)

            epoch_loss_g += loss.data[0]
            epoch_loss_d += fake_loss.data[0] + real_loss.data[0]
            total_psnr += psnr(
                content_criterion(reconstructed, target).data[0])

            print("Epoch[{}]({}/{}): Loss(G): {:.4f} Loss(D): {:.4f}".format(
                epoch, i, len(training_data_loader), loss.data[0],
                fake_loss.data[0] + real_loss.data[0]))
        avg_loss_g = epoch_loss_g / len(training_data_loader)
        avg_loss_d = epoch_loss_d / len(training_data_loader)
        avg_psnr = total_psnr / len(training_data_loader)

        print("Epoch {} : Avg Loss(G): {:.4f} Avg Loss(D): {:.4f}".format(
            epoch, avg_loss_g, avg_loss_d))
        plots.update([epoch, avg_psnr], rind=1, cind=1, lind=2)
        plots.update([epoch, avg_loss_g], rind=1, cind=2, lind=3)
        plots.update([epoch, avg_loss_d], rind=1, cind=2, lind=4)
        plots.save()
Beispiel #4
0
 def validate(epoch):
     total_loss = 0
     total_psnr = 0
     for batch in validation_data_loader:
         target = Variable(batch[0])
         input = Variable(batch[1])
         if cuda:
             target = target.cuda()
             input = input.cuda()
         mse = criterion(net(input), target)
         total_loss += mse.data[0]
         total_psnr += psnr(mse.data[0])
     avg_loss = total_loss / len(validation_data_loader)
     avg_psnr = total_psnr / len(validation_data_loader)
     print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr))
     plots.update([epoch, avg_psnr], rind=1, cind=1, lind=2)
     plots.update([epoch, avg_loss], rind=1, cind=2, lind=2)
     plots.save()
def run():
    image_seq = []
    mat_data = sio.loadmat('./data/toy31_cassi.mat')
    im_orig = mat_data['orig'] / 255.
    im_orig = torch.from_numpy(im_orig).type(torch.float32).to(device)
    image_m, image_n, image_c = im_orig.shape
    mask = torch.from_numpy(mat_data['mask'].astype(np.float32)).to(device)
    shape = im_orig.shape
    y = mat_data['meas'] / 255.
    y = torch.from_numpy(y).type(torch.float32).to(device)
    # data missing and noise
    # y = y + level * torch.randn_like(y)
    # index_rand = np.random.rand(*list(y.shape))
    # index_y = np.argwhere(index_rand < 0.05)
    # y[index_y[:,0], index_y[:,1]] = 0
    x = y.unsqueeze(2).expand_as(mask) * mask
    mask_sum = torch.sum(mask**2, dim=2)
    mask_sum[mask_sum == 0] = 1
    flag = True
    y1 = torch.zeros_like(y, dtype=torch.float32, device=device)
    sigma = 5. / 255
    for i in tqdm(range(100)):
        if i == 20: flag = False
        if i < 20: sigma = 50. / 255
        elif i < 30: sigma = 25. / 255
        elif i < 40: sigma = 12. / 255
        else: sigma = 6. / 255
        yb = torch.sum(mask * x, dim=2)
        # no Acceleration
        # temp = (y - yb) / mask_sum
        # x = x + 1 * (temp.unsqueeze(2).expand_as(mask) * mask)
        y1 = y1 + (y - yb)
        temp = (y1 - yb) / mask_sum
        x = x + 1 * (temp.unsqueeze(2).expand_as(mask) * mask)

        x = ffdnet_denosing(x, sigma, flag).clamp(0, 1)
        image_seq.append(x[:, :, 0].clamp(0., 1.).cpu().numpy())

    # save_ani(image_res, filename='ffd_HSI.mp4', fps=10)
    x.clamp_(0., 1.)
    psnr_ = [psnr(x[..., kv], im_orig[..., kv]) for kv in range(image_c)]
    ssim_ = [ssim(x[..., kv], im_orig[..., kv]) for kv in range(image_c)]
    return np.mean(psnr_), np.mean(ssim_)
def ApertureWisePSNR(Groundtruth, Reconstruction):
    """
    Calculate the PSNR value for each sub-aperture image of the
    input reconstructed light field.

    :param Groundtruth:    input groundtruth light field
    :param Reconstruction: input reconstruced light field

    :return:               aperture-wise PSNR values
    """
    h, w, s, t = Groundtruth.shape[:4]
    PSNRs = np.zeros([s, t])
    for i in range(s):
        for j in range(t):
            gtimg = Groundtruth[:, :, i, j, ...]
            gtimg = np.squeeze(gtimg)
            recons = Reconstruction[:, :, i, j, ...]
            recons = np.squeeze(recons)
            PSNRs[i, j] = psnr(gtimg, recons)
    return PSNRs
Beispiel #7
0
 def train_epoch(epoch):
     epoch_loss = 0
     total_psnr = 0
     for i, batch in enumerate(training_data_loader, 1):
         target = Variable(batch[0])
         input = Variable(batch[1])
         if cuda:
             target = target.cuda()
             input = input.cuda()
         optimizer.zero_grad()
         loss = criterion(net(input), target)
         epoch_loss += loss.data[0]
         total_psnr += psnr(loss.data[0])
         loss.backward()
         optimizer.step()
         print("Epoch[{}]({}/{}): Loss: {:.4f}".format(
             epoch, i, len(training_data_loader), loss.data[0]))
     avg_loss = epoch_loss / len(training_data_loader)
     avg_psnr = total_psnr / len(training_data_loader)
     print("Epoch {} : Avg Loss: {:.4f}".format(epoch, avg_loss))
     plots.update([epoch, avg_psnr], rind=1, cind=1, lind=1)
     plots.update([epoch, avg_loss], rind=1, cind=2, lind=1)
     plots.save()
def pnp_fbs_csmri(denoise_func, im_orig, mask, noises, **opts):

    alpha = opts.get('alpha', 0.4)
    maxitr = opts.get('maxitr', 100)
    verbose = opts.get('verbose', 1)
    sigma = opts.get('sigma', 5)
    """ Initialization. """

    m, n = im_orig.shape
    index = np.nonzero(mask)

    y = np.fft.fft2(im_orig) * mask + noises  # observed value
    x_init = np.fft.ifft2(y)  # zero fill

    print(psnr(x_init, im_orig))

    x = np.copy(x_init)
    """ Main loop. """
    for i in range(maxitr):

        xold = np.copy(x)
        """ Update variables. """

        res = np.fft.fft2(x) * mask
        index = np.nonzero(mask)
        res[index] = res[index] - y[index]
        x = x - alpha * np.fft.ifft2(res)
        # x = np.real( x )
        x = np.absolute(x)
        """ Monitoring. """

        # psnr
        if verbose:
            print("i: {}, \t psnr: {}"\
                  .format(i+1, psnr(x,im_orig)))

        xout = np.copy(x)
        """ Denoising step. """

        xtilde = np.copy(x)
        mintmp = np.min(xtilde)
        maxtmp = np.max(xtilde)
        xtilde = (xtilde - mintmp) / (maxtmp - mintmp)

        # the reason for the following scaling:
        # our denoisers are trained with "normalized images + noise"
        # so the scale should be 1 + O(sigma)
        scale_range = 1.0 + sigma / 255.0 / 2.0
        scale_shift = (1 - scale_range) / 2.0
        xtilde = xtilde * scale_range + scale_shift

        # pytorch denoising model
        xtilde_torch = np.reshape(xtilde, (1, 1, m, n))
        xtilde_torch = torch.from_numpy(xtilde_torch).type(
            torch.FloatTensor).cuda()
        r = denoise_func(xtilde_torch).cpu().numpy()
        r = np.reshape(r, (m, n))
        x = xtilde - r

        # scale and shift the denoised image back
        x = (x - scale_shift) / scale_range
        x = x * (maxtmp - mintmp) + mintmp

    return xout
Beispiel #9
0
    def train_epoch(epoch):
        epoch_loss_g = 0
        epoch_loss_d = 0
        total_psnr = 0
        for i, batch in enumerate(training_data_loader, 1):
            real_batch_size = batch[0].size(0)
            label.data.resize_(real_batch_size)

            target = Variable(batch[0])
            input = Variable(batch[1])
            if cuda:
                target = target.cuda()
                input = input.cuda()

            # Train Generator
            optimizerG.zero_grad()
            reconstructed = gennet(input)

            feature_r = vgg(reconstructed)
            feature_t = vgg(target)

            content_loss = content_criterion(feature_r, feature_t.detach())

            # use real label for log(G(D)) instead of 1-log(G(D))
            label.data.fill_(real_label)
            pred = disnet(reconstructed)
            adversarial_loss = adversarial_criterion(pred, label)

            loss = content_loss + 1e-3 * adversarial_loss
            loss.backward()
            optimizerG.step()

            # Train Discriminator
            optimizerD.zero_grad()
            reconstructed = gennet(input)

            label.data.fill_(fake_label)
            fake_loss = adversarial_criterion(disnet(reconstructed.detach()),
                                              label)
            fake_loss.backward()

            label.data.fill_(real_label)
            real_loss = adversarial_criterion(disnet(target), label)
            real_loss.backward()
            optimizerD.step()

            epoch_loss_g += loss.data[0]
            epoch_loss_d += fake_loss.data[0] + real_loss.data[0]
            total_psnr += psnr(
                content_criterion(reconstructed, target).data[0])

            print("Epoch[{}]({}/{}): Loss(G): {:.4f} Loss(D): {:.4f}".format(
                epoch, i, len(training_data_loader), loss.data[0],
                fake_loss.data[0] + real_loss.data[0]))
        avg_loss_g = epoch_loss_g / len(training_data_loader)
        avg_loss_d = epoch_loss_d / len(training_data_loader)
        avg_psnr = total_psnr / len(training_data_loader)

        print("Epoch {} : Avg Loss(G): {:.4f} Avg Loss(D): {:.4f}".format(
            epoch, avg_loss_g, avg_loss_d))
        plots.update([epoch, avg_psnr], rind=1, cind=1, lind=1)
        plots.update([epoch, avg_loss_g], rind=1, cind=2, lind=1)
        plots.update([epoch, avg_loss_d], rind=1, cind=2, lind=2)
        plots.save()
Beispiel #10
0
        inputs, labels = data
        inputs = inputs.to(device)
        labels = labels.to(device)

        with torch.no_grad():
            preds = model(inputs).clamp(0.0, 1.0)
        '''
        Adding image grids slows dows the execution. Uncomment this block if you wish to log output images in Tensorboard
        '''
        # grid_inputs = torchvision.utils.make_grid(inputs)
        # writer.add_image('Input LR',grid_inputs)
        # grid_outputs = torchvision.utils.make_grid(preds)
        # writer.add_image('Output HR',grid_outputs)
        # grid_gt = torchvision.utils.make_grid(labels)
        # writer.add_image('Ground Truth HR',grid_gt)

        psnr1 = psnr(inputs.squeeze(0), preds.squeeze(0))
        val_psnr_list += psnr1
        writer.add_scalar("val_psnr", psnr1)

    print('validation psnr: {} for epoch: {}'.format(
        val_psnr_list / len(val_dataset), epoch))
    torch.save(
        {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'val_psnr': psnr1,
        }, os.path.join('saved_weights/weighted',
                        'epoch_{}.pth'.format(epoch)))
Beispiel #11
0
            outputs = model(inputs)
            loss = criterion(outputs, labels).to(device)

            loss.backward()
            optimizer.step
            writer.add_scalar('loss', loss.item())

            running_loss += loss.item() * inputs.size(0)

            print('Epoch {}, Iteration: {}, Loss: {}'.format(
                epoch, iteration, loss.item()))
            '''
            calculating psnr,msim for every iteration is expensive. Can calculate for every 500 iterations
            '''
            if (iteration % 100 == 0):
                psnr_score = psnr(inputs, outputs)
                writer.add_scalar('psnr_train', psnr_score)
                ssim_score = utils.pytorch_ssim.ssim(
                    inputs, outputs)  #calculated in batches of batch_size
                writer.add_scalar('ssim_train', ssim_score.item())

            if (phase == 'validation'):
                if (iteration % 50 == 0):
                    psnr_score = psnr(inputs, outputs)
                    writer.add_scalar('psnr_validation', psnr_score)
                    ssim_score = utils.pytorch_ssim.ssim(
                        inputs, outputs)  #calculated in batches of batch_size
                    writer.add_scalar('ssim_validation', ssim_score.item())
                if (iteration % 200 == 0):
                    grid_inputs = torchvision.utils.make_grid(inputs)
                    writer.add_image('Input LR', grid_inputs)
Beispiel #12
0
def train(patch_width=PATCH_WIDTH,
          patch_height=PATCH_HEIGHT,
          epoch_num=EPOCH_NUM,
          batch_size=BATCH_SIZE,
          learning_rate=LEARNING_RATE,
          learning_rate_decay_steps=LEARNING_RATE_DECAY_STEPS,
          learning_rate_decay_rate=LEARNING_RATE_DECAY_RATE,
          dsp_itv=DSP_ITV,
          ckpt_path=ckpt_path,
          save_best=save_best,
          early_stop=early_stop):
    '''
    Difine the tensorflow graph, execute training and validation.
    '''
    #------------------------------define the graph---------------------------
    Y = tf.placeholder(tf.float32, [None, None, None, 1], name='Y')
    S0 = tf.placeholder(tf.float32, [None, None, None, 1], name='S0')
    DoLP = tf.placeholder(tf.float32, [None, None, None, 1], name='DoLP')
    AoP = tf.placeholder(tf.float32, [None, None, None, 1], name='AoP')
    #    Para = tf.placeholder(tf.float32, [None, None, None, 3])#define tensors of input and label

    # DoLP_hat= ForkNet(Y)
    S0_hat, DoLP_hat, AoP_hat = ForkNet(Y, padding='SAME')
    loss = LOSS(S0_hat, S0, DoLP_hat, DoLP, AoP_hat, AoP)
    tf.add_to_collection('S0_hat', S0_hat)
    tf.add_to_collection('DoLP_hat', DoLP_hat)
    tf.add_to_collection('AoP_hat', AoP_hat)

    global_step = tf.Variable(0, name='global_step', trainable=False)
    decayed_learning_rate = tf.train.exponential_decay(
        learning_rate,
        global_step,
        learning_rate_decay_steps,
        learning_rate_decay_rate,
        staircase=True)  # lerning rate decay

    train_step = tf.train.AdamOptimizer(decayed_learning_rate).minimize(
        loss, global_step=global_step)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()  #instantiate the saveer

    train_steps, train_Y, train_label, val_steps, val_Y, val_label = load_data(
    )
    # val_s0 = val_para[:, :, :, :1]
    # val_dolp = val_para[:, :, :, 1:2]
    # val_aop = val_para[:, :, :, 2:]
    #    print(np.max(val_Y))

    with tf.Session() as sess:
        print('\nStart Training!')
        sess.run(init)
        min_loss = np.inf
        wait = 0
        psnr_record = []
        total_S0_PSNR_BIC = 0
        total_DoLP_PSNR_BIC = 0
        total_AoP_PSNR_BIC = 0

        for epoch in range(epoch_num):
            #training set batch generator
            train_generator = patch_batch_generator(train_Y,
                                                    train_label,
                                                    batch_size,
                                                    patch_width,
                                                    patch_height,
                                                    random_shuffle=True)

            #test set batch generator
            val_generator = patch_batch_generator(val_Y,
                                                  val_label,
                                                  batch_size,
                                                  patch_width,
                                                  patch_height,
                                                  random_shuffle=False,
                                                  augment=False)

            print(
                '=======================================Epoch:{}/{}======================================='
                .format(epoch, epoch_num))
            # training
            total_train_loss = 0
            for step in range(train_steps):
                (Input_batch_train, Para_batch_train) = next(train_generator)

                Y_batch_train = Input_batch_train[:, :, :, :1]
                S0_batch_train = Para_batch_train[:, :, :, :1]
                DoLP_batch_train = Para_batch_train[:, :, :, 1:2]
                AoP_batch_train = Para_batch_train[:, :, :, 2:]

                sess.run(train_step,
                         feed_dict={
                             Y: Y_batch_train,
                             S0: S0_batch_train,
                             DoLP: DoLP_batch_train,
                             AoP: AoP_batch_train
                         })
                train_loss = sess.run(loss,
                                      feed_dict={
                                          Y: Y_batch_train,
                                          S0: S0_batch_train,
                                          DoLP: DoLP_batch_train,
                                          AoP: AoP_batch_train
                                      })
                total_train_loss += train_loss

                if step % dsp_itv == 0:
                    rate = float(step + 1) / float(train_steps)
                    rate_num = int(rate * 100)
                    arrow = 0 if step + 1 == train_steps else 1
                    r = '\rStep:%d/%d [%s%s%s]%d%% --- Training loss:%f' % \
                        (step + 1, train_steps, '■' * rate_num, '▶' * arrow, '-' * (100 - rate_num - arrow), rate * 100, train_loss)
                    print(r)

            # validation

            total_val_loss = 0
            total_S0_PSNR = 0
            total_DoLP_PSNR = 0
            total_AoP_PSNR = 0

            for step in range(val_steps):
                (Input_batch_val, Para_batch_val) = next(val_generator)

                Y_batch_val = Input_batch_val[:, :, :, :1]
                BIC_batch_val = Input_batch_val[:, :, :, 1:]
                S0_batch_val = Para_batch_val[:, :, :, :1]
                DoLP_batch_val = Para_batch_val[:, :, :, 1:2]
                AoP_batch_val = Para_batch_val[:, :, :, 2:]

                total_val_loss += sess.run(loss,
                                           feed_dict={
                                               Y: Y_batch_val,
                                               S0: S0_batch_val,
                                               DoLP: DoLP_batch_val,
                                               AoP: AoP_batch_val
                                           })
                S0_hat_val, DoLP_hat_val, AoP_hat_val = sess.run(
                    [S0_hat, DoLP_hat, AoP_hat], feed_dict={Y: Y_batch_val})
                #limit the value
                S0_hat_val = np.clip(S0_hat_val, 0, 2)
                DoLP_hat_val = np.clip(DoLP_hat_val, 0, 1)
                # AoP_hat_val = np.clip(AoP_hat_val, 0, math.pi)
                # DoLP_hat_val = Normalize(DoLP_hat_val, 0, 1)
                total_S0_PSNR += psnr(S0_batch_val[:, :, :, 0],
                                      S0_hat_val[:, :, :, 0], 2)
                total_DoLP_PSNR += psnr(DoLP_batch_val[:, :, :, 0],
                                        DoLP_hat_val[:, :, :, 0], 1)
                total_AoP_PSNR += psnr(AoP_batch_val[:, :, :, 0],
                                       AoP_hat_val[:, :, :, 0], math.pi / 2.)
                # for b in range(AoP_batch_val.shape[0]):
                #     total_AoP_PSNR += compare_ssim(np.float32(AoP_batch_val[b, :, :, 0]), np.float32(AoP_hat_val[b, :, :, 0]), data_range=math.pi / 2.)

                if epoch == 0:
                    #                print('max:', max(val_bic[0,6:-6,6:-6,0]))
                    S0_BIC = (BIC_batch_val[:, :, :, 0] +
                              BIC_batch_val[:, :, :, 1] +
                              BIC_batch_val[:, :, :, 2] +
                              BIC_batch_val[:, :, :, 3]) / 2.
                    DoLP_BIC = dolp(BIC_batch_val[:, :, :, 0],
                                    BIC_batch_val[:, :, :, 1],
                                    BIC_batch_val[:, :, :,
                                                  2], BIC_batch_val[:, :, :,
                                                                    3])
                    AoP_BIC = aop(
                        BIC_batch_val[:, :, :, 0], BIC_batch_val[:, :, :, 1],
                        BIC_batch_val[:, :, :, 2], BIC_batch_val[:, :, :, 3]
                    ) + math.pi / 4.  #avoid the minus number
                    total_S0_PSNR_BIC += psnr(S0_batch_val[:, :, :, 0], S0_BIC,
                                              2)
                    total_DoLP_PSNR_BIC += psnr(DoLP_batch_val[:, :, :, 0],
                                                DoLP_BIC, 1)
                    total_AoP_PSNR_BIC += psnr(AoP_batch_val[:, :, :, 0],
                                               AoP_BIC, math.pi / 2.)

                    # for b in range(AoP_batch_val.shape[0]):
                    #     total_AoP_PSNR_BIC += compare_ssim(np.float32(AoP_batch_val[b, :, :, 0]), np.float32(AoP_BIC[b]), data_range=math.pi / 2.)

            print(
                '========================================Validation======================================='
                + '\nTraining loss: %.5f' % (total_train_loss / train_steps) +
                '\nValidation loss: %.5f' % (total_val_loss / val_steps) +
                '\n ————————————————————————————————————————————————————————————————————————————————'
                +
                #                  '\n| PSNR of I_0 using PDCNN: %.5f    |   PSNR of I_0 using BICUBIC: %.5f   |' % (total_X_0_PSNR/val_steps, 32.872) +
                #                  '\n| PSNR of I_45 using PDCNN: %.5f   |   PSNR of I_45 using BICUBIC: %.5f  |' % (total_X_45_PSNR/val_steps, 32.973) +
                #                  '\n| PSNR of I_90 using PDCNN: %.5f   |   PSNR of I_90 using BICUBIC: %.5f  |' % (total_X_90_PSNR/val_steps, 33.008) +
                #                  '\n| PSNR of I_135 using PDCNN: %.5f  |   PSNR of I_135 using BICUBIC: %.5f |' % (total_X_135_PSNR/val_steps, 32.923) +
                '\n| PSNR of S_0 using SRCNN: %.5f    |   PSNR of S_0 using BICUBIC: %.5f   |'
                % (total_S0_PSNR / val_steps, total_S0_PSNR_BIC / val_steps) +
                '\n| PSNR of DoLP using SRCNN: %.5f   |   PSNR of DoLP using BICUBIC: %.5f  |'
                % (total_DoLP_PSNR / val_steps,
                   total_DoLP_PSNR_BIC / val_steps) +
                '\n| PSNR of AoP using SRCNN: %.5f    |   PSNR of AoP using BICUBIC: %.5f   |'
                %
                (total_AoP_PSNR / val_steps, total_AoP_PSNR_BIC / val_steps) +
                '\n ————————————————————————————————————————————————————————————————————————————————'
            )

            psnr_record.append([
                total_S0_PSNR / val_steps, total_DoLP_PSNR / val_steps,
                total_AoP_PSNR / val_steps
            ])

            if save_best or early_stop:
                if metrics == 'validation loss':
                    current_loss = total_val_loss / val_steps
                elif metrics == 'training loss':
                    current_loss = total_train_loss / train_steps
                if current_loss < min_loss:
                    print('Validation loss decreased from %.5f to %.5f' %
                          (min_loss, current_loss))
                    min_loss = current_loss
                    if save_best:
                        saver.save(sess, ckpt_path)
                        print("Model saved in file: %s" % ckpt_path)
                    if early_stop:
                        wait = 0
                else:
                    print('Validation loss did not decreased.')
                    if early_stop:
                        wait += 1
                        if wait > patient:
                            print('Early stop!')
                            break
        if not save_best:
            saver.save(sess, ckpt_path)
            print("Model saved in file: %s" % ckpt_path)

        with open(csv_path, 'w') as csv_file:
            writer = csv.writer(csv_file)
            writer.writerows(psnr_record)
Beispiel #13
0
def pnp_admm_photon_imaging(b, denoiser, im_true, **opts):
    """
    Parameters:
        :b - the observation in single photon imaging.
        :denoiser - the Gaussian denoiser used in Plug-and-Play ADMM.
        :im_true - the clean image used to monitor PSNR.
        :opts - the kwargs for hyperparameters in Plug-and-Play ADMM.
            :K - the parameter in single photo imaging.
            :lam - the value of 1/alpha.
            :rho - TODO
            :maxitr - the max number of iterations.
            :verbose - a flag that enables/disables info printing.
                - NOTE: if `peak` and `M` options exist in `opts`, then the
                  `clean` image is the scaled version of the original image.
            :beta - the prior weight parameter.
            :step - TODO
    """
    """ Process parameters. """

    K = opts.get('K', 8)
    lam = opts.get('lam', 15.0)
    rho = opts.get('rho', 100.0)  # rho = 1.0 / alpha
    maxitr = opts.get('maxitr', 50)
    data_range = opts.get('data_range', 1.0)
    verbose = opts.get('verbose', 1)
    step = opts.get('step', 1.0)
    beta = opts.get('beta', 1.0)
    """ Initialization. """

    K1 = blockfunc(b, (K, K), np.sum)
    x = K1 / K**2

    u = np.zeros_like(x, dtype=np.float64)
    v = np.copy(x)

    m, n = x.shape

    sigma = np.sqrt(lam / rho)
    """ Main loop. """
    for i in range(maxitr):

        x_old = x
        u_old = u
        v_old = v
        """ Inverse step. """

        x = inverse_step(u, v, K1, K, rho)
        """ Denoising step. """

        vtilde = x + u

        # scale vtilde to be in range of [0,1]
        mintmp = 0.0
        maxtmp = np.max(vtilde)
        vtilde = (vtilde - mintmp) / (maxtmp - mintmp)
        trans_sigma = sigma / (maxtmp - mintmp)

        # then set data range to [0.15, 0.85] to avoid clipping of extreme values
        scale_range = 0.4
        scale_shift = (1 - scale_range) / 2.0
        vtilde = vtilde * scale_range + scale_shift
        trans_sigma = trans_sigma * scale_range

        # pytorch denoising model
        vtilde_torch = np.reshape(vtilde, (1, 1, m, n))
        vtilde_torch = torch.from_numpy(vtilde_torch).type(
            torch.FloatTensor).cuda()
        r = denoiser(vtilde_torch).cpu().numpy()
        r = np.reshape(r, (m, n))
        v = vtilde - r

        # scale and shift the denoised image back
        v = (v - scale_shift) / scale_range
        v = v * (maxtmp - mintmp) + mintmp
        """ Update variables. """
        u = u + x - v
        """ Monitoring. """
        # successive difference
        dif = np.sqrt(np.sum(np.square(x - x_old)))
        dif_denom = np.sqrt(np.sum(np.square(x_old)))

        # psnr

        if verbose:
            print("i: {}, \t successive difference: {}, \t psnr: {} \t"\
                  .format(i+1, dif/dif_denom, psnr(im_true, x)))

    return x
Beispiel #14
0
    15, 2.0, 100
)  # the arguments are default sigma, default alpha and default max iteration.

# ---- load the model ----
model = load_model(opt.model_type, opt.sigma)

with torch.no_grad():

    # ---- load the ground truth ----
    im_orig = cv2.imread('Demo_mat/CS_MRI/Brain.jpg', 0) / 255.0

    # ---- load mask matrix ----
    mat = sio.loadmat('Demo_mat/CS_MRI/Q_Random30.mat')
    mask = mat.get('Q1').astype(np.float64)

    # ---- load noises -----
    noises = sio.loadmat('Demo_mat/CS_MRI/noises.mat')
    noises = noises.get('noises').astype(np.complex128) * 3.0

    # ---- set options -----
    opts = dict(sigma=opt.sigma,
                alpha=opt.alpha,
                maxitr=opt.maxitr,
                verbose=opt.verbose)

    # ---- plug and play !!! -----
    out = pnp_admm_csmri(model, im_orig, mask, noises, **opts)

    # ---- print result -----
    print('Plug-and-Play PNSR: ', psnr(out, im_orig))
Beispiel #15
0
        DoLP_true = dolp(origin_img[i, :, :, 0], origin_img[i, :, :, 1], origin_img[i, :, :, 2], origin_img[i, :, :, 3])
        AoP_true = aop(origin_img[i, :, :, 0], origin_img[i, :, :, 1], origin_img[i, :, :, 2], origin_img[i, :, :, 3]) + math.pi/4.
        origin_s0[i] = S0_true
        origin_dolp[i] = DoLP_true
        origin_aop[i] = AoP_true

        #the dolp of bic images
        S0_BIC = 1 / 2 * (bic_img[i, :, :, 0] + bic_img[i, :, :, 1] + bic_img[i, :, :, 2] + bic_img[i,:, :, 3])
        DoLP_BIC = dolp(bic_img[i, :, :, 0], bic_img[i, :, :, 1], bic_img[i, :, :, 2], bic_img[i, :, :, 3])
        AoP_BIC = aop(bic_img[i, :, :, 0], bic_img[i, :, :, 1], bic_img[i, :, :, 2], bic_img[i, :, :, 3]) + math.pi / 4.
        bic_s0[i] = S0_BIC
        bic_dolp[i] = DoLP_BIC
        bic_aop[i] = AoP_BIC

        # Calculate the PSNR of S0, DoLP and AoP obtained through PDCNN method
        total_S0_PSNR[i]=  psnr(S0_true, S0_hat_test, 2)
        total_DoLP_PSNR[i] = psnr(DoLP_true, DoLP_hat_test, 1)
        total_AoP_PSNR[i] = psnr(AoP_true, AoP_hat_test, math.pi/2.)

        # Calculate the PSNR of S0, DoLP and AoP obtained through BICUBIC method
        total_S0_PSNR_BIC[i] = psnr(S0_true, S0_BIC, 2)
        total_DoLP_PSNR_BIC[i] = psnr(DoLP_true, DoLP_BIC, 1)
        total_AoP_PSNR_BIC[i] = psnr(AoP_true, AoP_BIC, math.pi/2.)

        # show the progress bar
        view_bar(i, IMG_NUM)

    print('\n========================================Testing=======================================' +
          '\n ————————————————————————————————————————————————————————————————————————————————' +
          '\n| PSNR of S_0 using SRCNN: %.5f    |   PSNR of S_0 using BICUBIC: %.5f   |' % (np.mean(total_S0_PSNR), np.mean(total_S0_PSNR_BIC)) +
          '\n| PSNR of DoLP using PDCNN: %.5f   |   PSNR of DoLP using BICUBIC: %.5f  |' % (np.mean(total_DoLP_PSNR), np.mean(total_DoLP_PSNR_BIC)) +
Beispiel #16
0
def pnp_admm_csmri(model, im_orig, mask, noises, **opts):

    alpha = opts.get('alpha', 2.0)
    maxitr = opts.get('maxitr', 100)
    verbose = opts.get('verbose', 1)
    sigma = opts.get('sigma', 5)
    """ Initialization. """

    m, n = im_orig.shape
    index = np.nonzero(mask)

    y = np.fft.fft2(im_orig) * mask + noises  # observed value
    x_init = np.fft.ifft2(y)  # zero fill

    print('zero-fill PSNR:', psnr(x_init, im_orig))

    x = np.absolute(np.copy(x_init))
    v = np.copy(x)
    u = np.zeros((m, n), dtype=np.float64)
    """ Main loop. """
    for i in range(maxitr):

        xold = np.copy(x)
        vold = np.copy(v)
        uold = np.copy(u)
        """ Update variables. """

        vtilde = np.copy(x + u)
        vf = np.fft.fft2(vtilde)
        La2 = 1.0 / 2.0 / alpha
        vf[index] = (La2 * vf[index] + y[index]) / (1.0 + La2)
        v = np.real(np.fft.ifft2(vf))
        """ Denoising step. """

        xtilde = np.copy(2 * v - xold - uold)
        mintmp = np.min(xtilde)
        maxtmp = np.max(xtilde)
        xtilde = (xtilde - mintmp) / (maxtmp - mintmp)

        # the reason for the following scaling:
        # our denoisers are trained with "normalized images + noise"
        # so the scale should be 1 + O(sigma)
        scale_range = 1.0 + sigma / 255.0 / 2.0
        scale_shift = (1 - scale_range) / 2.0
        xtilde = xtilde * scale_range + scale_shift

        # pytorch denoising model
        xtilde_torch = np.reshape(xtilde, (1, 1, m, n))
        xtilde_torch = torch.from_numpy(xtilde_torch).type(
            torch.FloatTensor).cuda()
        r = model(xtilde_torch).cpu().numpy()
        r = np.reshape(r, (m, n))
        x = xtilde - r

        # scale and shift the denoised image back
        x = (x - scale_shift) / scale_range
        x = x * (maxtmp - mintmp) + mintmp
        """ Update variables. """
        u = uold + xold - v
        """ Monitoring. """
        if verbose:
            print("i: {}, \t psnr: {}"\
                  .format(i+1, psnr(x,im_orig)))

    return x
def run():
    mat_data = sio.loadmat('./data/toy31_cassi.mat')
    im_orig = mat_data['orig'] / 255
    im_orig = torch.from_numpy(im_orig).type(torch.float32).to(device)
    image_m, image_n, image_c = im_orig.shape
    # image_seq = []
    # ---- load mask matrix ----
    mask = torch.from_numpy(mat_data['mask'].astype(np.float32)).to(device)
    y = mat_data['meas'] / 255
    y = torch.from_numpy(y).type(torch.float32).to(device)
    # data missing and noise
    # y = y + level * torch.randn_like(y)
    # index_rand = np.random.rand(*list(y.shape))
    # index_y = np.argwhere(index_rand < 0.05)
    # y[index_y[:,0], index_y[:,1]] = 0
    x = y.unsqueeze(2).expand_as(mask) * mask
    mask_sum = torch.sum(mask**2, dim=2)
    mask_sum[mask_sum == 0] = 1
    flag = True
    y1 = torch.zeros_like(y, dtype=torch.float32, device=device)
    sigma_ = 50 / 255
    for i in tqdm(range(100)):
        if i == 20: flag = False
        yb = torch.sum(mask * x, dim=2)
        # no Acceleration
        # temp = (y - yb) / (mask_sum)
        # x = x + 1 * (temp.unsqueeze(2).expand_as(mask) * mask)
        y1 = y1 + (y - yb)
        temp = (y1 - yb) / mask_sum
        x = x + 1 * (temp.unsqueeze(2).expand_as(mask) * mask)

        if i < 20:
            x = ffdnet_denosing(x, 50. / 255, flag)
        else:
            ffdnet_hypara_list = [100., 80., 60., 40., 20., 10., 5.]
            ffdnet_num = len(ffdnet_hypara_list)
            tv_hypara_list = [10, 0.01]
            tv_num = len(tv_hypara_list)
            ffdnet_list = [
                ffdnet_denosing(x, level / 255., flag).clamp(0, 1)
                for level in ffdnet_hypara_list
            ]
            tv_list = [
                TV_denoising(x, level, 5).clamp(0, 1)
                for level in tv_hypara_list
            ]

            ffdnet_mat = np.stack([
                x_ele[:, :, :].cpu().numpy().reshape(-1).astype(np.float64)
                for x_ele in ffdnet_list
            ],
                                  axis=0)
            tv_mat = np.stack([
                x_ele[:, :, :].cpu().numpy().reshape(-1).astype(np.float64)
                for x_ele in tv_list
            ],
                              axis=0)
            w = cp.Variable(ffdnet_num + tv_num)
            P = np.zeros((ffdnet_num + tv_num, ffdnet_num + tv_num))
            P[:ffdnet_num, :ffdnet_num] = ffdnet_mat @ ffdnet_mat.T
            P[:ffdnet_num, ffdnet_num:] = -ffdnet_mat @ tv_mat.T
            P[ffdnet_num:, :ffdnet_num] = -tv_mat @ ffdnet_mat.T
            P[ffdnet_num:, ffdnet_num:] = tv_mat @ tv_mat.T
            one_vector_ffdnet = np.ones((1, ffdnet_num))
            one_vector_tv = np.ones((1, tv_num))
            objective = cp.quad_form(w, P)
            problem = cp.Problem(cp.Minimize(objective), [
                one_vector_ffdnet @ w[:ffdnet_num] == 1,
                one_vector_tv @ w[ffdnet_num:] == 1, w >= 0
            ])
            problem.solve()
            w_value = w.value
            x_ffdnet, x_tv = 0, 0
            for idx in range(ffdnet_num):
                x_ffdnet += w_value[idx] * ffdnet_list[idx]
            for idx in range(tv_num):
                x_tv += w_value[idx + ffdnet_num] * tv_list[idx]
            x = 0.5 * (x_ffdnet + x_tv)
            # image_seq.append(x[...,0])

    x.clamp_(0, 1)
    # fps = 10
    # save_ani(image_seq, filename='HSI.mp4', fps=fps)
    psnr_ = [psnr(x[..., kv], im_orig[..., kv]) for kv in range(image_c)]
    ssim_ = [ssim(x[..., kv], im_orig[..., kv]) for kv in range(image_c)]
    return np.mean(psnr_), np.mean(ssim_)