예제 #1
0
def test():
    model.eval()
    #input and label
    avg_psnr1 = 0
    avg_ssim1 = 0
    #output and label
    avg_psnr2 = 0
    avg_ssim2 = 0

    for i, sample in enumerate(dataloader):
        input_image, label_image, name = sample['input_image'], sample[
            'label_image'], sample['name'][0]  #tuple to str

        #Wrap with torch Variable
        input_image, label_image = wrap_variable(input_image, label_image,
                                                 use_gpu, True)
        inputs_1 = downsampling(input_image)
        label_1 = downsampling(label_image)
        inputs_2 = downsampling(inputs_1)
        label_2 = downsampling(label_1)

        output_image, outputs_1, outputs_2 = model(input_image, inputs_1,
                                                   inputs_2)

        #predict
        #output_image = model(input_image)
        #clamp in[0,1]
        output_image = output_image.clamp(0.0, 1.0)

        #calculate psnr
        psnr1 = myutils.psnr(input_image, label_image)
        psnr2 = myutils.psnr(output_image, label_image)
        # ssim is calculated with the normalize (range [0, 1]) image
        ssim1 = torch.sum(
            (myutils.ssim(input_image, label_image,
                          size_average=False)).data) / 1.0  #batch_size
        ssim2 = torch.sum((myutils.ssim(
            output_image, label_image, size_average=False)).data) / 1.0

        avg_ssim1 += ssim1
        avg_psnr1 += psnr1
        avg_ssim2 += ssim2
        avg_psnr2 += psnr2

        #save output and record
        checkpoint(name, psnr1, psnr2, ssim1, ssim2)
        save(output_image, name)

    #print and save
    avg_psnr1 = avg_psnr1 / len(dataloader)
    avg_ssim1 = avg_ssim1 / len(dataloader)
    avg_psnr2 = avg_psnr2 / len(dataloader)
    avg_ssim2 = avg_ssim2 / len(dataloader)

    print('Avg. PSNR: {:.4f}->{:.4f} Avg. SSIM: {:.4f}->{:.4f}'.format(
        avg_psnr1, avg_psnr2, avg_ssim1, avg_ssim2))
    output = open(os.path.join(Image_folder, 'test_result.txt'), 'a+')
    output.write('Avg. PSNR: {:.4f}->{:.4f} Avg. SSIM: {:.4f}->{:.4f}'.format(
        avg_psnr1, avg_psnr2, avg_ssim1, avg_ssim2) + '\r\n')
    output.close()
def test():
    model.eval()
    avg_psnr = 0
    avg_ssim = 0
    for iteration, sample in enumerate(test_dataloader):
        inputs,label=sample['inputs'],sample['label']
        #Wrap with torch Variable
        inputs,label=wrap_variable(inputs, label, use_gpu)
        outputs = model(inputs)

        psnr = myutils.psnr(outputs, label)
        ssim = torch.sum((myutils.ssim(outputs, label, size_average=False)).data)/args.testbatchsize
        avg_ssim += ssim
        avg_psnr += psnr

    return (avg_psnr / len(test_dataloader)),(avg_ssim / len(test_dataloader))
def test():
    model.eval()
    avg_psnr = 0
    avg_ssim = 0
    avg_mse = 0
    avg_mse_0 = 0
    avg_mse_1 = 0
    avg_mse_2 = 0

    for iteration, sample in enumerate(test_dataloader):
        inputs, label = sample['inputs'], sample['label']
        #Wrap with torch Variable
        inputs, label = wrap_variable(inputs, label, use_gpu, True)

        inputs_1 = downsampling(inputs)
        label_1 = downsampling(label)
        inputs_2 = downsampling(inputs_1)
        label_2 = downsampling(label_1)

        outputs, outputs_1, outputs_2 = model(inputs, inputs_1, inputs_2)

        mse_0 = criterion(outputs, label).data[0]
        mse_1 = criterion(outputs_1, label_1).data[0]
        mse_2 = criterion(outputs_2, label_2).data[0]
        psnr = myutils.psnr(outputs, label)
        ssim = torch.sum((myutils.ssim(
            outputs, label, size_average=False)).data) / args.testbatchsize
        avg_ssim += ssim
        avg_psnr += psnr
        avg_mse += mse_0 + mse_1 * 0.5 + mse_2 * 0.25
        avg_mse_0 += mse_0
        avg_mse_1 += mse_1
        avg_mse_2 += mse_2

    return (avg_psnr /
            len(test_dataloader)), (avg_ssim / len(test_dataloader)), (
                avg_mse /
                len(test_dataloader)), (avg_mse_0 / len(test_dataloader)), (
                    avg_mse_1 / len(test_dataloader)), (avg_mse_2 /
                                                        len(test_dataloader))
def test():
    netG.eval()
    valing_results = {'mse': 0, 'ssims': 0, 'psnr': 0, 'ssim': 0, 'batch_sizes': 0}

    for iteration, sample in enumerate(test_dataloader):
        inputs,label=sample['inputs'],sample['label']
        batch_size = inputs.size(0)
        valing_results['batch_sizes'] += batch_size

        #Wrap with torch Variable
        inputs,label=wrap_variable(inputs, label, use_gpu, True)
        #get the output of netG
        outputs = netG(inputs)
        #calcualte metrics
        batch_mse = ((outputs - label) ** 2).data.mean()
        valing_results['mse'] += batch_mse * batch_size
        batch_ssim = myutils.ssim(outputs, label).data[0]
        valing_results['ssims'] += batch_ssim * batch_size#sum each patch,you must divide batch sizes
    
    valing_results['psnr'] = 10 * log10(1 / (valing_results['mse'] / valing_results['batch_sizes']))
    valing_results['ssim'] = valing_results['ssims'] / valing_results['batch_sizes']

    return valing_results['psnr'], valing_results['ssim'], valing_results['mse'] / valing_results['batch_sizes']
def test():
    model.eval()
    avg_psnr = 0
    avg_ssim = 0
    avg_mse = 0
    for iteration, sample in enumerate(test_dataloader):
        inputs, label = sample['inputs'], sample['label']
        #Wrap with torch Variable
        inputs, label = wrap_variable(inputs, label, use_gpu, True)

        #inputs=myutils.normalize_batch(inputs)
        vgg_feature = vgg(myutils.normalize_batch(inputs)).relu4_3

        outputs = model(inputs, vgg_feature)
        mse = criterion(outputs, label).data[0]
        psnr = myutils.psnr(outputs, label)
        ssim = torch.sum((myutils.ssim(
            outputs, label, size_average=False)).data) / args.testbatchsize
        avg_ssim += ssim
        avg_psnr += psnr
        avg_mse += mse
    return (avg_psnr / len(test_dataloader)), (
        avg_ssim / len(test_dataloader)), (avg_mse / len(test_dataloader))
def test():
    model.eval()
    avg_psnr1 = 0
    avg_ssim1 = 0
    avg_msssim1 = 0
    #output and label
    avg_psnr2 = 0
    avg_ssim2 = 0
    avg_msssim2 = 0

    for i, sample in enumerate(dataloader):
        input_image, label_image, name = sample['input_image'], sample[
            'label_image'], sample['name'][0]  #tuple to str

        ####################################
        #Wrap with torch Variable
        input_image, label_image = wrap_variable(input_image, label_image,
                                                 use_gpu, True)

        ###############################

        #some parameters
        patch_size = 120
        #print(input_image.size())
        [batch_number, channel, height, width] = input_image.size()
        row_numbers = int(height / patch_size)
        col_numbers = int(width / patch_size)
        #1 create the output tensor
        output_image = Variable(torch.zeros(batch_number, channel, height,
                                            width).cuda(),
                                volatile=True)

        for row_number in range(row_numbers):
            for col_number in range(col_numbers):
                row_start = patch_size * row_number
                row_end = patch_size * (row_number + 1)
                col_start = patch_size * col_number
                col_end = patch_size * (col_number + 1)

                input_patch = input_image[:, :, row_start:row_end,
                                          col_start:col_end]

                #estimate the output
                output_patch = MS(model, input_patch)

                output_image[:, :, row_start:row_end,
                             col_start:col_end] = output_patch

        for row_number in range(row_numbers):
            row_start = patch_size * row_number
            row_end = patch_size * (row_number + 1)
            col_start = width - patch_size
            col_end = width
            input_patch = input_image[:, :, row_start:row_end,
                                      col_start:col_end]

            #estimate the output
            output_patch = MS(model, input_patch)
            output_image[:, :, row_start:row_end,
                         col_start:col_end] = output_patch

        for col_number in range(col_numbers):
            row_start = height - patch_size
            row_end = height
            col_start = patch_size * col_number
            col_end = patch_size * (col_number + 1)
            input_patch = input_image[:, :, row_start:row_end,
                                      col_start:col_end]

            #estimate the output
            output_patch = MS(model, input_patch)
            output_image[:, :, row_start:row_end,
                         col_start:col_end] = output_patch

        #last one
        row_start = height - patch_size
        row_end = height
        col_start = width - patch_size
        col_end = width
        input_patch = input_image[:, :, row_start:row_end, col_start:col_end]
        #estimate the output
        output_patch = MS(model, input_patch)
        output_image[:, :, row_start:row_end, col_start:col_end] = output_patch
        #######################################

        ##################################

        #predict
        #output_image = model(input_image)
        #clamp in[0,1]
        output_image = output_image.clamp(0.0, 1.0)

        #calculate psnr
        psnr1 = myutils.psnr(input_image, label_image)
        psnr2 = myutils.psnr(output_image, label_image)
        # ssim is calculated with the normalize (range [0, 1]) image
        ssim1 = torch.sum(
            (myutils.ssim(input_image, label_image,
                          size_average=False)).data) / 1.0  #batch_size
        ssim2 = torch.sum((myutils.ssim(
            output_image, label_image, size_average=False)).data) / 1.0

        #msssim
        msssim1 = numpy.sum((msssim.MultiScaleSSIM(
            numpy.expand_dims(input_image.data[0].clone().cpu().numpy(),
                              axis=0),
            numpy.expand_dims(label_image.data[0].clone().cpu().numpy(),
                              axis=0),
            max_val=1.0))) / 1.0  #batch_size
        msssim2 = numpy.sum((msssim.MultiScaleSSIM(
            numpy.expand_dims(output_image.data[0].clone().cpu().numpy(),
                              axis=0),
            numpy.expand_dims(label_image.data[0].clone().cpu().numpy(),
                              axis=0),
            max_val=1.0))) / 1.0

        avg_ssim1 += ssim1
        avg_psnr1 += psnr1
        avg_ssim2 += ssim2
        avg_psnr2 += psnr2
        avg_msssim1 += msssim1
        avg_msssim2 += msssim2

        #save output and record
        checkpoint(name, psnr1, psnr2, ssim1, ssim2, msssim1, msssim2)
        save(output_image, name)

    #print and save
    avg_psnr1 = avg_psnr1 / len(dataloader)
    avg_ssim1 = avg_ssim1 / len(dataloader)
    avg_psnr2 = avg_psnr2 / len(dataloader)
    avg_ssim2 = avg_ssim2 / len(dataloader)
    avg_msssim1 = avg_msssim1 / len(dataloader)
    avg_msssim2 = avg_msssim2 / len(dataloader)

    print(
        'Avg. PSNR: {:.4f}->{:.4f} Avg. SSIM: {:.4f}->{:.4f} Avg. MSSSIM: {:.4f}->{:.4f}'
        .format(avg_psnr1, avg_psnr2, avg_ssim1, avg_ssim2, avg_msssim1,
                avg_msssim2))
    output = open(os.path.join(Image_folder, 'test_result.txt'), 'a+')
    output.write(
        'Avg. PSNR: {:.4f}->{:.4f} Avg. SSIM: {:.4f}->{:.4f}Avg. MSSSIM: {:.4f}->{:.4f}'
        .format(avg_psnr1, avg_psnr2, avg_ssim1, avg_ssim2, avg_msssim1,
                avg_msssim2) + '\r\n')
    output.close()